From 2b694752d58efab0b6ece630a41fa8befc445b54 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Fri, 12 Sep 2025 17:43:59 +0800 Subject: [PATCH 1/6] chore: clone docs for v1.0.1 --- docs/en/release-1_0_1/cli/_category_.yml | 5 + docs/en/release-1_0_1/cli/cli.mdx | 203 + docs/en/release-1_0_1/cli/kbcli.mdx | 74 + docs/en/release-1_0_1/cli/kbcli_addon.mdx | 52 + .../cli/kbcli_addon_describe.mdx | 46 + .../release-1_0_1/cli/kbcli_addon_disable.mdx | 63 + .../release-1_0_1/cli/kbcli_addon_enable.mdx | 94 + .../release-1_0_1/cli/kbcli_addon_index.mdx | 50 + .../cli/kbcli_addon_index_add.mdx | 56 + .../cli/kbcli_addon_index_delete.mdx | 50 + .../cli/kbcli_addon_index_list.mdx | 53 + .../cli/kbcli_addon_index_update.mdx | 57 + .../release-1_0_1/cli/kbcli_addon_install.mdx | 74 + .../en/release-1_0_1/cli/kbcli_addon_list.mdx | 51 + .../release-1_0_1/cli/kbcli_addon_purge.mdx | 63 + .../release-1_0_1/cli/kbcli_addon_search.mdx | 60 + .../cli/kbcli_addon_uninstall.mdx | 57 + .../release-1_0_1/cli/kbcli_addon_upgrade.mdx | 79 + .../en/release-1_0_1/cli/kbcli_backuprepo.mdx | 48 + .../cli/kbcli_backuprepo_create.mdx | 90 + .../cli/kbcli_backuprepo_delete.mdx | 53 + .../cli/kbcli_backuprepo_describe.mdx | 53 + ...kbcli_backuprepo_list-storage-provider.mdx | 56 + .../cli/kbcli_backuprepo_list.mdx | 57 + .../cli/kbcli_backuprepo_update.mdx | 60 + docs/en/release-1_0_1/cli/kbcli_cluster.mdx | 86 + .../cli/kbcli_cluster_backup.mdx | 71 + .../cli/kbcli_cluster_cancel-ops.mdx | 54 + .../cli/kbcli_cluster_configure.mdx | 71 + .../cli/kbcli_cluster_connect.mdx | 65 + .../cli/kbcli_cluster_convert-to-v1.mdx | 58 + .../cli/kbcli_cluster_create.mdx | 68 + .../kbcli_cluster_create_apecloud-mysql.mdx | 86 + .../cli/kbcli_cluster_create_etcd.mdx | 74 + .../cli/kbcli_cluster_create_kafka.mdx | 89 + .../cli/kbcli_cluster_create_mongodb.mdx | 74 + .../cli/kbcli_cluster_create_mysql.mdx | 78 + .../cli/kbcli_cluster_create_postgresql.mdx | 72 + .../cli/kbcli_cluster_create_qdrant.mdx | 72 + .../cli/kbcli_cluster_create_rabbitmq.mdx | 73 + .../cli/kbcli_cluster_create_redis.mdx | 92 + .../cli/kbcli_cluster_custom-ops.mdx | 62 + .../cli/kbcli_cluster_delete-backup.mdx | 60 + .../cli/kbcli_cluster_delete-ops.mdx | 63 + .../cli/kbcli_cluster_delete.mdx | 65 + .../kbcli_cluster_describe-backup-policy.mdx | 57 + .../cli/kbcli_cluster_describe-backup.mdx | 57 + .../cli/kbcli_cluster_describe-config.mdx | 65 + .../cli/kbcli_cluster_describe-ops.mdx | 53 + .../cli/kbcli_cluster_describe-restore.mdx | 53 + .../cli/kbcli_cluster_describe.mdx | 53 + .../cli/kbcli_cluster_diff-config.mdx | 53 + .../cli/kbcli_cluster_edit-backup-policy.mdx | 53 + .../cli/kbcli_cluster_edit-config.mdx | 67 + .../cli/kbcli_cluster_explain-config.mdx | 67 + .../cli/kbcli_cluster_expose.mdx | 71 + .../release-1_0_1/cli/kbcli_cluster_label.mdx | 73 + .../kbcli_cluster_list-backup-policies.mdx | 61 + .../cli/kbcli_cluster_list-backups.mdx | 64 + .../cli/kbcli_cluster_list-components.mdx | 58 + .../cli/kbcli_cluster_list-events.mdx | 58 + .../cli/kbcli_cluster_list-instances.mdx | 58 + .../cli/kbcli_cluster_list-logs.mdx | 61 + .../cli/kbcli_cluster_list-ops.mdx | 63 + .../cli/kbcli_cluster_list-restores.mdx | 64 + .../release-1_0_1/cli/kbcli_cluster_list.mdx | 70 + .../release-1_0_1/cli/kbcli_cluster_logs.mdx | 92 + .../cli/kbcli_cluster_promote.mdx | 62 + .../cli/kbcli_cluster_rebuild-instance.mdx | 75 + .../cli/kbcli_cluster_register.mdx | 64 + .../cli/kbcli_cluster_restart.mdx | 64 + .../cli/kbcli_cluster_restore.mdx | 63 + .../cli/kbcli_cluster_scale-in.mdx | 69 + .../cli/kbcli_cluster_scale-out.mdx | 69 + .../release-1_0_1/cli/kbcli_cluster_start.mdx | 63 + .../release-1_0_1/cli/kbcli_cluster_stop.mdx | 64 + .../cli/kbcli_cluster_update.mdx | 103 + .../cli/kbcli_cluster_upgrade-to-v1.mdx | 58 + .../cli/kbcli_cluster_upgrade.mdx | 69 + .../cli/kbcli_cluster_volume-expand.mdx | 63 + .../cli/kbcli_cluster_vscale.mdx | 67 + .../cli/kbcli_clusterdefinition.mdx | 44 + .../cli/kbcli_clusterdefinition_describe.mdx | 53 + .../cli/kbcli_clusterdefinition_list.mdx | 56 + .../cli/kbcli_componentdefinition.mdx | 44 + .../kbcli_componentdefinition_describe.mdx | 53 + .../cli/kbcli_componentdefinition_list.mdx | 59 + .../cli/kbcli_componentversion.mdx | 44 + .../cli/kbcli_componentversion_describe.mdx | 53 + .../cli/kbcli_componentversion_list.mdx | 59 + .../cli/kbcli_dataprotection.mdx | 54 + .../cli/kbcli_dataprotection_backup.mdx | 67 + .../kbcli_dataprotection_delete-backup.mdx | 59 + ..._dataprotection_describe-backup-policy.mdx | 56 + .../kbcli_dataprotection_describe-backup.mdx | 53 + .../kbcli_dataprotection_describe-restore.mdx | 53 + ...bcli_dataprotection_edit-backup-policy.mdx | 53 + .../kbcli_dataprotection_list-action-sets.mdx | 56 + ...li_dataprotection_list-backup-policies.mdx | 60 + ...rotection_list-backup-policy-templates.mdx | 56 + .../cli/kbcli_dataprotection_list-backups.mdx | 60 + .../kbcli_dataprotection_list-restores.mdx | 57 + .../cli/kbcli_dataprotection_restore.mdx | 57 + .../en/release-1_0_1/cli/kbcli_kubeblocks.mdx | 51 + .../cli/kbcli_kubeblocks_compare.mdx | 57 + .../cli/kbcli_kubeblocks_config.mdx | 58 + .../cli/kbcli_kubeblocks_describe-config.mdx | 60 + .../cli/kbcli_kubeblocks_install.mdx | 81 + .../cli/kbcli_kubeblocks_list-versions.mdx | 58 + .../cli/kbcli_kubeblocks_preflight.mdx | 72 + .../cli/kbcli_kubeblocks_status.mdx | 57 + .../cli/kbcli_kubeblocks_uninstall.mdx | 59 + .../cli/kbcli_kubeblocks_upgrade.mdx | 66 + .../cli/kbcli_ops-definition.mdx | 44 + .../cli/kbcli_ops-definition_describe.mdx | 53 + .../cli/kbcli_ops-definition_list.mdx | 60 + docs/en/release-1_0_1/cli/kbcli_options.mdx | 54 + .../en/release-1_0_1/cli/kbcli_playground.mdx | 44 + .../cli/kbcli_playground_destroy.mdx | 56 + .../cli/kbcli_playground_init.mdx | 87 + docs/en/release-1_0_1/cli/kbcli_plugin.mdx | 55 + .../cli/kbcli_plugin_describe.mdx | 56 + .../release-1_0_1/cli/kbcli_plugin_index.mdx | 50 + .../cli/kbcli_plugin_index_add.mdx | 55 + .../cli/kbcli_plugin_index_delete.mdx | 53 + .../cli/kbcli_plugin_index_list.mdx | 53 + .../cli/kbcli_plugin_index_update.mdx | 46 + .../cli/kbcli_plugin_install.mdx | 56 + .../release-1_0_1/cli/kbcli_plugin_list.mdx | 53 + .../release-1_0_1/cli/kbcli_plugin_search.mdx | 58 + .../cli/kbcli_plugin_uninstall.mdx | 53 + .../cli/kbcli_plugin_upgrade.mdx | 57 + docs/en/release-1_0_1/cli/kbcli_report.mdx | 44 + .../cli/kbcli_report_cluster.mdx | 79 + .../cli/kbcli_report_kubeblocks.mdx | 70 + docs/en/release-1_0_1/cli/kbcli_trace.mdx | 47 + .../release-1_0_1/cli/kbcli_trace_create.mdx | 63 + .../release-1_0_1/cli/kbcli_trace_delete.mdx | 53 + .../en/release-1_0_1/cli/kbcli_trace_list.mdx | 57 + .../release-1_0_1/cli/kbcli_trace_update.mdx | 62 + .../release-1_0_1/cli/kbcli_trace_watch.mdx | 53 + docs/en/release-1_0_1/cli/kbcli_version.mdx | 47 + .../01-overview.mdx | 66 + .../02-quickstart.mdx | 325 + .../04-operations/01-stop-start-restart.mdx | 307 + .../04-operations/02-vertical-scaling.mdx | 176 + .../04-operations/03-horizontal-scaling.mdx | 234 + .../04-operations/04-volume-expansion.mdx | 251 + .../04-operations/05-manage-loadbalancer.mdx | 306 + .../09-decommission-a-specific-replica.mdx | 134 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 197 + .../08-monitoring/_category_.yml | 4 + .../_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 66 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 36 + .../kubeblocks-for-kafka/01-overview.mdx | 43 + .../kubeblocks-for-kafka/02-quickstart.mdx | 472 + .../04-operations/01-stop-start-restart.mdx | 310 + .../04-operations/02-vertical-scaling.mdx | 174 + .../04-operations/03-horizontal-scaling.mdx | 227 + .../04-operations/04-volume-expansion.mdx | 258 + .../04-operations/05-manage-loadbalancer.mdx | 303 + .../09-decommission-a-specific-replica.mdx | 163 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 248 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-kafka/_category_.yml | 4 + .../kubeblocks-for-kafka/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 84 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-milvus/01-overview.mdx | 84 + .../kubeblocks-for-milvus/02-quickstart.mdx | 436 + .../03-topologies/01-standlone.mdx | 135 + .../03-topologies/02-cluster.mdx | 522 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop-start-restart.mdx | 257 + .../04-operations/02-vertical-scaling.mdx | 189 + .../04-operations/03-horizontal-scaling.mdx | 240 + .../04-operations/05-manage-loadbalancer.mdx | 295 + .../09-decommission-a-specific-replica.mdx | 143 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 227 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-milvus/_category_.yml | 4 + .../kubeblocks-for-milvus/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 36 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 48 + .../kubeblocks-for-mongodb/01-overview.mdx | 73 + .../kubeblocks-for-mongodb/02-quickstart.mdx | 544 + .../04-operations/01-stop-start-restart.mdx | 283 + .../04-operations/02-vertical-scaling.mdx | 174 + .../04-operations/03-horizontal-scaling.mdx | 271 + .../04-operations/04-volume-expansion.mdx | 218 + .../04-operations/05-manage-loadbalancer.mdx | 333 + .../04-operations/08-switchover.mdx | 181 + .../09-decommission-a-specific-replica.mdx | 139 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 125 + .../02-create-full-backup.mdx | 223 + .../03-scheduled-full-backup.mdx | 150 + .../04-scheduled-continuous-backup.mdx | 181 + .../05-restoring-from-full-backup.mdx | 161 + .../06-restore-with-pitr.mdx | 181 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 135 + .../06-custom-secret/_category_.yml | 4 + .../kubeblocks-for-mongodb/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 36 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-mysql/01-overview.mdx | 73 + .../kubeblocks-for-mysql/02-quickstart.mdx | 356 + .../03-topologies/01-semisync.mdx | 322 + .../02-semisync-with-proxysql.mdx | 260 + .../03-topologies/03-mgr.mdx | 209 + .../03-topologies/04-mgr-with-proxysql.mdx | 241 + .../03-topologies/05-orchestrator.mdx | 370 + .../06-orchestrator-with-proxysql.mdx | 375 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop_start_restart.mdx | 267 + .../04-operations/02-vertical-scaling.mdx | 207 + .../04-operations/03-horizontal-scaling.mdx | 251 + .../04-operations/04-volume-expansion.mdx | 235 + .../04-operations/05-manage-loadbalancer.mdx | 411 + .../06-minior-version-upgrade.mdx | 216 + .../04-operations/07-modify-parameters.mdx | 235 + .../04-operations/08-switchover.mdx | 173 + .../09-decommission-a-specific-replica.mdx | 215 + .../04-operations/11-rebuild-replica.mdx | 399 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 126 + .../02-create-full-backup.mdx | 257 + .../03-scheduled-full-backup.mdx | 158 + .../04-scheduled-continuous-backup.mdx | 163 + .../05-restoring-from-full-backup.mdx | 167 + .../06-restore-with-pitr.mdx | 163 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 143 + .../02-custom-password-generation-policy.mdx | 131 + .../06-custom-secret/_category_.yml | 4 + .../07-tls/01-tls-overview.mdx | 137 + .../07-tls/02-tls-custom-cert.mdx | 192 + .../kubeblocks-for-mysql/07-tls/03-mtls.mdx | 261 + .../07-tls/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 250 + .../08-monitoring/_category_.yml | 4 + .../01-custom-scheduling-policy.mdx | 246 + .../02-custom-pod-resources.mdx | 290 + ...03-parallel-pod-management-concurrency.mdx | 291 + .../04-instance-update-strategy-ondelete.mdx | 184 + .../05-gradual-rolling-update.mdx | 205 + .../09-advanced-pod-management/_category_.yml | 4 + .../kubeblocks-for-mysql/_category_.yml | 4 + .../kubeblocks-for-postgresql/01-overview.mdx | 82 + .../02-quickstart.mdx | 579 + .../04-operations/01-stop-start-restart.mdx | 283 + .../04-operations/02-vertical-scaling.mdx | 189 + .../04-operations/03-horizontal-scaling.mdx | 293 + .../04-operations/04-volume-expansion.mdx | 237 + .../04-operations/05-manage-loadbalancer.mdx | 386 + .../06-minior-version-upgrade.mdx | 293 + .../04-operations/07-modify-parameters.mdx | 258 + .../04-operations/08-switchover.mdx | 179 + .../09-decommission-a-specific-replica.mdx | 154 + .../04-operations/11-rebuild-replica.mdx | 320 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 163 + .../02-create-full-backup.mdx | 261 + .../03-scheduled-full-backup.mdx | 222 + .../04-scheduled-continuous-backup.mdx | 424 + .../05-restoring-from-full-backup.mdx | 210 + .../06-restore-with-pitr.mdx | 245 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 139 + .../02-custom-password-generation-policy.mdx | 116 + .../06-custom-secret/_category_.yml | 4 + .../07-tls/01-tls-overview.mdx | 194 + .../07-tls/02-tls-custom-cert.mdx | 200 + .../07-tls/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 263 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-postgresql/09-faqs.mdx | 71 + .../kubeblocks-for-postgresql/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-pg-replication-cluster.mdx | 35 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-pg-replication-cluster.mdx | 18 + .../kubeblocks-for-qdrant/01-overview.mdx | 57 + .../kubeblocks-for-qdrant/02-quickstart.mdx | 446 + .../04-operations/01-stop-start-restart.mdx | 281 + .../04-operations/02-vertical-scaling.mdx | 174 + .../04-operations/03-horizontal-scaling.mdx | 279 + .../04-operations/04-volume-expansion.mdx | 218 + .../04-operations/05-manage-loadbalancer.mdx | 297 + .../06-minior-version-upgrade.mdx | 271 + .../09-decommission-a-specific-replica.mdx | 134 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 125 + .../02-create-full-backup.mdx | 217 + .../03-scheduled-full-backup.mdx | 150 + .../05-restoring-from-full-backup.mdx | 161 + .../05-backup-restore/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 246 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-qdrant/_category_.yml | 4 + .../kubeblocks-for-qdrant/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 36 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-rabbitmq/01-overview.mdx | 51 + .../kubeblocks-for-rabbitmq/02-quickstart.mdx | 484 + .../04-operations/01-stop-start-restart.mdx | 281 + .../04-operations/02-vertical-scaling.mdx | 174 + .../04-operations/03-horizontal-scaling.mdx | 236 + .../04-operations/04-volume-expansion.mdx | 218 + .../04-operations/05-manage-loadbalancer.mdx | 317 + .../09-decommission-a-specific-replica.mdx | 151 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 226 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-rabbitmq/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 36 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-redis/01-overview.mdx | 68 + .../kubeblocks-for-redis/02-quickstart.mdx | 557 + .../03-topologies/01-standlone.mdx | 93 + .../03-topologies/02-replication.mdx | 130 + .../03-topologies/03-sharding.mdx | 236 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop-start-restart.mdx | 312 + .../04-operations/02-vertical-scaling.mdx | 178 + .../04-operations/03-horizontal-scaling.mdx | 279 + .../04-operations/04-volume-expansion.mdx | 237 + .../04-operations/05-manage-loadbalancer.mdx | 330 + .../04-operations/07-modify-parameters.mdx | 126 + .../04-operations/08-switchover.mdx | 179 + .../09-decommission-a-specific-replica.mdx | 131 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 125 + .../02-create-full-backup.mdx | 215 + .../03-scheduled-full-backup.mdx | 150 + .../04-scheduled-continuous-backup.mdx | 160 + .../05-restoring-from-full-backup.mdx | 179 + .../06-restore-with-pitr.mdx | 200 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 154 + .../06-custom-secret/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 263 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-redis/09-faqs.mdx | 114 + .../kubeblocks-for-redis/_category_.yml | 4 + .../kubeblocks-for-redis/_tpl/_category_.yml | 5 + .../_create-redis-replication-cluster.mdx | 55 + .../_tpl/_prerequisites.mdx | 11 + .../_verify-redis-replication-cluster.mdx | 33 + .../01-introduction.mdx | 35 + .../kubeblocks-for-starrocks/02-provision.mdx | 92 + .../kubeblocks-for-starrocks/03-scale.mdx | 348 + .../04-stop-and-start.mdx | 197 + .../kubeblocks-for-starrocks/05-restart.mdx | 77 + .../06-expand-volume.mdx | 165 + .../kubeblocks-for-starrocks/10-delete.mdx | 82 + .../kubeblocks-for-starrocks/_category_.yml | 4 + .../release_notes/kbcli_template.mdx | 3 + .../release-1_0_1/release_notes/template.mdx | 192 + .../release_notes/v0.1.0/_category_.yml | 4 + .../release_notes/v0.1.0/template.mdx | 168 + .../release_notes/v0.1.0/v0.1.0.mdx | 188 + .../release_notes/v0.2.0/_category_.yml | 4 + .../release_notes/v0.2.0/v0.2.0.mdx | 146 + .../release_notes/v0.3.0/_category_.yml | 4 + .../release_notes/v0.3.0/v0.3.0.mdx | 151 + .../release_notes/v0.3.0/v0.3.1.mdx | 10 + .../release_notes/v0.3.0/v0.3.2.mdx | 16 + .../release_notes/v0.3.0/v0.3.3.mdx | 10 + .../release_notes/v0.3.0/v0.3.4.mdx | 11 + .../release_notes/v0.3.0/v0.3.5.mdx | 12 + .../release_notes/v0.3.0/v0.3.6.mdx | 10 + .../release_notes/v0.3.0/v0.3.7.mdx | 18 + .../release_notes/v0.3.0/v0.3.8.mdx | 13 + .../release_notes/v0.4.0/_category_.yml | 4 + .../release_notes/v0.4.0/v0.4.0.mdx | 80 + .../release_notes/v0.5.0/_category_.yml | 4 + .../release_notes/v0.5.0/v0.5.0.mdx | 271 + .../release_notes/v0.6.0/_category_.yml | 4 + .../release_notes/v0.6.0/v0.6.0.mdx | 269 + .../release_notes/v0.7.0/_category_.yml | 4 + .../release_notes/v0.7.0/v0.7.0.mdx | 79 + .../release_notes/v0.8.0/_category_.yml | 4 + .../release_notes/v0.8.0/v0.8.0.mdx | 101 + .../release_notes/v0.9.0/0.9.0.mdx | 111 + .../release_notes/v0.9.0/_category_.yml | 4 + .../user_docs/concepts/_category_.yml | 4 + .../backup-and-restore/_category_.yaml | 4 + .../backup-and-restore/backup/_category_.yaml | 4 + .../backup-and-restore/backup/backup-repo.mdx | 648 + .../backup/configure-backuppolicy.mdx | 172 + .../backup/on-demand-backup.mdx | 138 + .../backup/scheduled-backup.mdx | 86 + .../backup-and-restore/introduction.mdx | 31 + .../restore/_category_.yaml | 4 + .../backup-and-restore/restore/pitr.mdx | 115 + .../restore/restore-data-from-backup-set.mdx | 59 + .../user_docs/concepts/concept.mdx | 160 + .../concepts/in-place-update/_category_.yaml | 4 + .../in-place-update/ignore-vertical-scale.mdx | 13 + .../concepts/in-place-update/overview.mdx | 54 + .../concepts/instance-template/_category_.yml | 4 + .../how-to-use-instance-template.mdx | 206 + .../instance-template/introduction.mdx | 27 + .../user_docs/overview/_category_.yml | 4 + .../user_docs/overview/install-kubeblocks.mdx | 540 + .../user_docs/overview/introduction.mdx | 242 + .../user_docs/overview/supported-addons.mdx | 216 + .../user_docs/references/_category_.yml | 4 + .../references/api-reference/_category_.yml | 4 + .../references/api-reference/add-on.mdx | 2545 + .../references/api-reference/cluster.mdx | 57618 ++++++++++++++++ .../api-reference/dataprotection.mdx | 11576 ++++ .../references/api-reference/operations.mdx | 7998 +++ .../references/api-reference/parameters.mdx | 5770 ++ .../user_docs/references/install-addons.mdx | 327 + .../user_docs/references/install-kbcli.mdx | 285 + .../user_docs/references/install-minio.mdx | 52 + .../install-snapshot-controller.mdx | 76 + .../references/kubeblocks_options.mdx | 217 + .../kubernetes_and_operator_101.mdx | 116 + .../prepare-a-local-k8s-cluster.mdx | 257 + .../user_docs/references/terminology.mdx | 98 + .../user_docs/release_notes/_category_.yml | 4 + .../release_notes/release-09/090.mdx | 117 + .../release_notes/release-09/091.mdx | 98 + .../release_notes/release-09/092.mdx | 55 + .../release_notes/release-09/093.mdx | 82 + .../release_notes/release-09/094.mdx | 75 + .../release_notes/release-09/095.mdx | 67 + .../release_notes/release-09/_category_.yml | 4 + .../release_notes/release-10/100-cn.mdx | 187 + .../release_notes/release-10/100.mdx | 187 + .../release_notes/release-10/_category_.yml | 4 + .../user_docs/troubleshooting/_category_.yml | 4 + .../handle-a-cluster-exception.mdx | 210 + .../troubleshooting/known-issues.mdx | 91 + .../user_docs/upgrade/_category_.yml | 5 + .../user_docs/upgrade/upgrade-to-0_8.mdx | 115 + .../user_docs/upgrade/upgrade-to-0_9_0.mdx | 171 + .../upgrade/upgrade-to-v09-version.mdx | 413 + package.json | 2 + .../docs/[version]/[category]/layout.tsx | 17 + .../docs/[version]/[category]/version.tsx | 2 +- src/app/api/search-index/route.ts | 40 +- src/constants/versions.ts | 19 +- yarn.lock | 5 + 461 files changed, 137908 insertions(+), 30 deletions(-) create mode 100644 docs/en/release-1_0_1/cli/_category_.yml create mode 100644 docs/en/release-1_0_1/cli/cli.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_disable.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_enable.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_index.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_index_add.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_index_delete.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_index_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_index_update.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_install.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_purge.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_search.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_uninstall.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_addon_upgrade.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_create.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_delete.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_list-storage-provider.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_backuprepo_update.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_cancel-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_configure.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_connect.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_apecloud-mysql.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_postgresql.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_qdrant.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_rabbitmq.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_create_redis.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_delete-backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_delete-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_delete.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup-policy.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe-config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe-restore.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_edit-backup-policy.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_edit-config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_explain-config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_expose.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_label.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-backup-policies.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-backups.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-components.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-events.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-instances.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-logs.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list-restores.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_logs.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_promote.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_rebuild-instance.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_register.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_restart.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_restore.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_scale-in.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_scale-out.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_start.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_stop.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_update.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_upgrade-to-v1.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_upgrade.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_volume-expand.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_vscale.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_clusterdefinition.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_clusterdefinition_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_clusterdefinition_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentdefinition.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentdefinition_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentdefinition_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentversion.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentversion_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_componentversion_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_delete-backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup-policy.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-restore.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_edit-backup-policy.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_list-action-sets.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policies.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policy-templates.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backups.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_list-restores.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_dataprotection_restore.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_compare.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_describe-config.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_install.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_list-versions.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_preflight.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_status.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_uninstall.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_kubeblocks_upgrade.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_ops-definition.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_ops-definition_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_ops-definition_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_options.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_playground.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_playground_destroy.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_playground_init.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_describe.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_index.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_index_add.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_index_delete.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_index_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_index_update.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_install.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_search.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_uninstall.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_plugin_upgrade.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_report.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_report_cluster.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_report_kubeblocks.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace_create.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace_delete.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace_list.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace_update.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_trace_watch.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_version.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/01-standlone.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/02-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/08-switchover.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/01-semisync.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/02-semisync-with-proxysql.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/03-mgr.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/07-modify-parameters.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/08-switchover.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/02-custom-password-generation-policy.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/01-tls-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/03-mtls.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/01-custom-scheduling-policy.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/02-custom-pod-resources.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/04-instance-update-strategy-ondelete.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/05-gradual-rolling-update.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-mysql/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/08-switchover.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/09-faqs.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/01-overview.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/02-quickstart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/01-standlone.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/02-replication.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/03-sharding.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/08-switchover.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/09-faqs.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_category_.yml create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_prerequisites.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/01-introduction.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/02-provision.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/03-scale.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/04-stop-and-start.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/05-restart.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/06-expand-volume.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/10-delete.mdx create mode 100644 docs/en/release-1_0_1/kubeblocks-for-starrocks/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/kbcli_template.mdx create mode 100644 docs/en/release-1_0_1/release_notes/template.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.1.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.1.0/template.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.1.0/v0.1.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.2.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.2.0/v0.2.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.1.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.2.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.3.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.4.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.5.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.6.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.7.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.8.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.4.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.4.0/v0.4.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.5.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.5.0/v0.5.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.6.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.6.0/v0.6.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.7.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.7.0/v0.7.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.8.0/_category_.yml create mode 100644 docs/en/release-1_0_1/release_notes/v0.8.0/v0.8.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.9.0/0.9.0.mdx create mode 100644 docs/en/release-1_0_1/release_notes/v0.9.0/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/_category_.yaml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/_category_.yaml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/introduction.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/_category_.yaml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/pitr.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/concept.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/in-place-update/_category_.yaml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/in-place-update/overview.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/instance-template/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/concepts/instance-template/how-to-use-instance-template.mdx create mode 100644 docs/en/release-1_0_1/user_docs/concepts/instance-template/introduction.mdx create mode 100644 docs/en/release-1_0_1/user_docs/overview/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/overview/install-kubeblocks.mdx create mode 100644 docs/en/release-1_0_1/user_docs/overview/introduction.mdx create mode 100644 docs/en/release-1_0_1/user_docs/overview/supported-addons.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/add-on.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/cluster.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/dataprotection.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/operations.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/api-reference/parameters.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/install-addons.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/install-kbcli.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/install-minio.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/install-snapshot-controller.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/kubeblocks_options.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/kubernetes_and_operator_101.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/prepare-a-local-k8s-cluster.mdx create mode 100644 docs/en/release-1_0_1/user_docs/references/terminology.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/090.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/091.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/092.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/093.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/094.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-09/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-10/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/troubleshooting/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/troubleshooting/handle-a-cluster-exception.mdx create mode 100644 docs/en/release-1_0_1/user_docs/troubleshooting/known-issues.mdx create mode 100644 docs/en/release-1_0_1/user_docs/upgrade/_category_.yml create mode 100644 docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_8.mdx create mode 100644 docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_9_0.mdx create mode 100644 docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-v09-version.mdx diff --git a/docs/en/release-1_0_1/cli/_category_.yml b/docs/en/release-1_0_1/cli/_category_.yml new file mode 100644 index 00000000..dff53aca --- /dev/null +++ b/docs/en/release-1_0_1/cli/_category_.yml @@ -0,0 +1,5 @@ +position: 30 +label: Command Line +collapsible: true +collapsed: true +className: hide-children \ No newline at end of file diff --git a/docs/en/release-1_0_1/cli/cli.mdx b/docs/en/release-1_0_1/cli/cli.mdx new file mode 100644 index 00000000..54487f0e --- /dev/null +++ b/docs/en/release-1_0_1/cli/cli.mdx @@ -0,0 +1,203 @@ +--- +title: KubeBlocks CLI Overview +description: KubeBlocks CLI overview +sidebar_position: 1 +--- + +## [addon](kbcli_addon.md) + +Addon command. + +* [kbcli addon describe](kbcli_addon_describe.md) - Describe an addon specification. +* [kbcli addon disable](kbcli_addon_disable.md) - Disable an addon. +* [kbcli addon enable](kbcli_addon_enable.md) - Enable an addon. +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes +* [kbcli addon install](kbcli_addon_install.md) - Install KubeBlocks addon +* [kbcli addon list](kbcli_addon_list.md) - List addons. +* [kbcli addon purge](kbcli_addon_purge.md) - Purge the sub-resources of specified addon and versions +* [kbcli addon search](kbcli_addon_search.md) - Search the addon from index +* [kbcli addon uninstall](kbcli_addon_uninstall.md) - Uninstall an existed addon +* [kbcli addon upgrade](kbcli_addon_upgrade.md) - Upgrade an existed addon to latest version or a specified version + + +## [backuprepo](kbcli_backuprepo.md) + +BackupRepo command. + +* [kbcli backuprepo create](kbcli_backuprepo_create.md) - Create a backup repository +* [kbcli backuprepo delete](kbcli_backuprepo_delete.md) - Delete a backup repository. +* [kbcli backuprepo describe](kbcli_backuprepo_describe.md) - Describe a backup repository. +* [kbcli backuprepo list](kbcli_backuprepo_list.md) - List Backup Repositories. +* [kbcli backuprepo list-storage-provider](kbcli_backuprepo_list-storage-provider.md) - List storage providers. +* [kbcli backuprepo update](kbcli_backuprepo_update.md) - Update a backup repository. + + +## [cluster](kbcli_cluster.md) + +Cluster command. + +* [kbcli cluster backup](kbcli_cluster_backup.md) - Create a backup for the cluster. +* [kbcli cluster cancel-ops](kbcli_cluster_cancel-ops.md) - Cancel the pending/creating/running OpsRequest which type is vscale or hscale. +* [kbcli cluster configure](kbcli_cluster_configure.md) - Configure parameters with the specified components in the cluster. +* [kbcli cluster connect](kbcli_cluster_connect.md) - Connect to a cluster or instance. +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - +* [kbcli cluster delete](kbcli_cluster_delete.md) - Delete clusters. +* [kbcli cluster delete-backup](kbcli_cluster_delete-backup.md) - Delete a backup. +* [kbcli cluster delete-ops](kbcli_cluster_delete-ops.md) - Delete an OpsRequest. +* [kbcli cluster describe](kbcli_cluster_describe.md) - Show details of a specific cluster. +* [kbcli cluster describe-backup](kbcli_cluster_describe-backup.md) - Describe a backup. +* [kbcli cluster describe-backup-policy](kbcli_cluster_describe-backup-policy.md) - Describe backup policy +* [kbcli cluster describe-config](kbcli_cluster_describe-config.md) - Show details of a specific reconfiguring. +* [kbcli cluster describe-ops](kbcli_cluster_describe-ops.md) - Show details of a specific OpsRequest. +* [kbcli cluster describe-restore](kbcli_cluster_describe-restore.md) - Describe a restore +* [kbcli cluster edit-backup-policy](kbcli_cluster_edit-backup-policy.md) - Edit backup policy +* [kbcli cluster edit-config](kbcli_cluster_edit-config.md) - Edit the config file of the component. +* [kbcli cluster explain-config](kbcli_cluster_explain-config.md) - List the constraint for supported configuration params. +* [kbcli cluster expose](kbcli_cluster_expose.md) - Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. +* [kbcli cluster label](kbcli_cluster_label.md) - Update the labels on cluster +* [kbcli cluster list](kbcli_cluster_list.md) - List clusters. +* [kbcli cluster list-backup-policies](kbcli_cluster_list-backup-policies.md) - List backups policies. +* [kbcli cluster list-backups](kbcli_cluster_list-backups.md) - List backups. +* [kbcli cluster list-components](kbcli_cluster_list-components.md) - List cluster components. +* [kbcli cluster list-events](kbcli_cluster_list-events.md) - List cluster events. +* [kbcli cluster list-instances](kbcli_cluster_list-instances.md) - List cluster instances. +* [kbcli cluster list-logs](kbcli_cluster_list-logs.md) - List supported log files in cluster. +* [kbcli cluster list-ops](kbcli_cluster_list-ops.md) - List all opsRequests. +* [kbcli cluster list-restores](kbcli_cluster_list-restores.md) - List restores. +* [kbcli cluster logs](kbcli_cluster_logs.md) - Access cluster log file. +* [kbcli cluster promote](kbcli_cluster_promote.md) - Promote a non-primary or non-leader instance as the new primary or leader of the cluster +* [kbcli cluster rebuild-instance](kbcli_cluster_rebuild-instance.md) - Rebuild the specified instances in the cluster. +* [kbcli cluster register](kbcli_cluster_register.md) - Pull the cluster chart to the local cache and register the type to 'create' sub-command +* [kbcli cluster restart](kbcli_cluster_restart.md) - Restart the specified components in the cluster. +* [kbcli cluster restore](kbcli_cluster_restore.md) - Restore a new cluster from backup. +* [kbcli cluster scale-in](kbcli_cluster_scale-in.md) - scale in replicas of the specified components in the cluster. +* [kbcli cluster scale-out](kbcli_cluster_scale-out.md) - scale out replicas of the specified components in the cluster. +* [kbcli cluster start](kbcli_cluster_start.md) - Start the cluster if cluster is stopped. +* [kbcli cluster stop](kbcli_cluster_stop.md) - Stop the cluster and release all the pods of the cluster. +* [kbcli cluster update](kbcli_cluster_update.md) - Update the cluster settings, such as enable or disable monitor or log. +* [kbcli cluster upgrade](kbcli_cluster_upgrade.md) - Upgrade the service version(only support to upgrade minor version). +* [kbcli cluster upgrade-to-v1](kbcli_cluster_upgrade-to-v1.md) - upgrade cluster to v1 api version. +* [kbcli cluster volume-expand](kbcli_cluster_volume-expand.md) - Expand volume with the specified components and volumeClaimTemplates in the cluster. +* [kbcli cluster vscale](kbcli_cluster_vscale.md) - Vertically scale the specified components in the cluster. + + +## [clusterdefinition](kbcli_clusterdefinition.md) + +ClusterDefinition command. + +* [kbcli clusterdefinition describe](kbcli_clusterdefinition_describe.md) - Describe ClusterDefinition. +* [kbcli clusterdefinition list](kbcli_clusterdefinition_list.md) - List ClusterDefinitions. + + +## [componentdefinition](kbcli_componentdefinition.md) + +ComponentDefinition command. + +* [kbcli componentdefinition describe](kbcli_componentdefinition_describe.md) - Describe ComponentDefinition. +* [kbcli componentdefinition list](kbcli_componentdefinition_list.md) - List ComponentDefinition. + + +## [componentversion](kbcli_componentversion.md) + +ComponentVersions command. + +* [kbcli componentversion describe](kbcli_componentversion_describe.md) - Describe ComponentVersion. +* [kbcli componentversion list](kbcli_componentversion_list.md) - List ComponentVersion. + + +## [dataprotection](kbcli_dataprotection.md) + +Data protection command. + +* [kbcli dataprotection backup](kbcli_dataprotection_backup.md) - Create a backup for the cluster. +* [kbcli dataprotection delete-backup](kbcli_dataprotection_delete-backup.md) - Delete a backup. +* [kbcli dataprotection describe-backup](kbcli_dataprotection_describe-backup.md) - Describe a backup +* [kbcli dataprotection describe-backup-policy](kbcli_dataprotection_describe-backup-policy.md) - Describe a backup policy +* [kbcli dataprotection describe-restore](kbcli_dataprotection_describe-restore.md) - Describe a restore +* [kbcli dataprotection edit-backup-policy](kbcli_dataprotection_edit-backup-policy.md) - Edit backup policy +* [kbcli dataprotection list-action-sets](kbcli_dataprotection_list-action-sets.md) - List actionsets +* [kbcli dataprotection list-backup-policies](kbcli_dataprotection_list-backup-policies.md) - List backup policies +* [kbcli dataprotection list-backup-policy-templates](kbcli_dataprotection_list-backup-policy-templates.md) - List backup policy templates +* [kbcli dataprotection list-backups](kbcli_dataprotection_list-backups.md) - List backups. +* [kbcli dataprotection list-restores](kbcli_dataprotection_list-restores.md) - List restores. +* [kbcli dataprotection restore](kbcli_dataprotection_restore.md) - Restore a new cluster from backup + + +## [kubeblocks](kbcli_kubeblocks.md) + +KubeBlocks operation commands. + +* [kbcli kubeblocks compare](kbcli_kubeblocks_compare.md) - List the changes between two different version KubeBlocks. +* [kbcli kubeblocks config](kbcli_kubeblocks_config.md) - KubeBlocks config. +* [kbcli kubeblocks describe-config](kbcli_kubeblocks_describe-config.md) - Describe KubeBlocks config. +* [kbcli kubeblocks install](kbcli_kubeblocks_install.md) - Install KubeBlocks. +* [kbcli kubeblocks list-versions](kbcli_kubeblocks_list-versions.md) - List KubeBlocks versions. +* [kbcli kubeblocks preflight](kbcli_kubeblocks_preflight.md) - Run and retrieve preflight checks for KubeBlocks. +* [kbcli kubeblocks status](kbcli_kubeblocks_status.md) - Show list of resource KubeBlocks uses or owns. +* [kbcli kubeblocks uninstall](kbcli_kubeblocks_uninstall.md) - Uninstall KubeBlocks. +* [kbcli kubeblocks upgrade](kbcli_kubeblocks_upgrade.md) - Upgrade KubeBlocks. + + +## [ops-definition](kbcli_ops-definition.md) + +ops-definitions command. + +* [kbcli ops-definition describe](kbcli_ops-definition_describe.md) - Describe OpsDefinition. +* [kbcli ops-definition list](kbcli_ops-definition_list.md) - List OpsDefinition. + + +## [options](kbcli_options.md) + +Print the list of flags inherited by all commands. + + + +## [playground](kbcli_playground.md) + +Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +* [kbcli playground destroy](kbcli_playground_destroy.md) - Destroy the playground KubeBlocks and kubernetes cluster. +* [kbcli playground init](kbcli_playground_init.md) - Bootstrap a kubernetes cluster and install KubeBlocks for playground. + + +## [plugin](kbcli_plugin.md) + +Provides utilities for interacting with plugins. + + Plugins provide extended functionality that is not part of the major command-line distribution. + +* [kbcli plugin describe](kbcli_plugin_describe.md) - Describe a plugin +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes +* [kbcli plugin install](kbcli_plugin_install.md) - Install kbcli or kubectl plugins +* [kbcli plugin list](kbcli_plugin_list.md) - List all visible plugin executables on a user's PATH +* [kbcli plugin search](kbcli_plugin_search.md) - Search kbcli or kubectl plugins +* [kbcli plugin uninstall](kbcli_plugin_uninstall.md) - Uninstall kbcli or kubectl plugins +* [kbcli plugin upgrade](kbcli_plugin_upgrade.md) - Upgrade kbcli or kubectl plugins + + +## [report](kbcli_report.md) + +Report kubeblocks or cluster info. + +* [kbcli report cluster](kbcli_report_cluster.md) - Report Cluster information +* [kbcli report kubeblocks](kbcli_report_kubeblocks.md) - Report KubeBlocks information, including deployments, events, logs, etc. + + +## [trace](kbcli_trace.md) + +trace management command + +* [kbcli trace create](kbcli_trace_create.md) - create a trace. +* [kbcli trace delete](kbcli_trace_delete.md) - Delete a trace. +* [kbcli trace list](kbcli_trace_list.md) - list all traces. +* [kbcli trace update](kbcli_trace_update.md) - update a trace. +* [kbcli trace watch](kbcli_trace_watch.md) - watch a trace. + + +## [version](kbcli_version.md) + +Print the version information, include kubernetes, KubeBlocks and kbcli version. + + + diff --git a/docs/en/release-1_0_1/cli/kbcli.mdx b/docs/en/release-1_0_1/cli/kbcli.mdx new file mode 100644 index 00000000..de027710 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli +--- + +KubeBlocks CLI. + +### Synopsis + +``` + +============================================= + __ __ _______ ______ __ ______ +| \ / \ \ / \| \ | \ +| ▓▓ / ▓▓ ▓▓▓▓▓▓▓\ ▓▓▓▓▓▓\ ▓▓ \▓▓▓▓▓▓ +| ▓▓/ ▓▓| ▓▓__/ ▓▓ ▓▓ \▓▓ ▓▓ | ▓▓ +| ▓▓ ▓▓ | ▓▓ ▓▓ ▓▓ | ▓▓ | ▓▓ +| ▓▓▓▓▓\ | ▓▓▓▓▓▓▓\ ▓▓ __| ▓▓ | ▓▓ +| ▓▓ \▓▓\| ▓▓__/ ▓▓ ▓▓__/ \ ▓▓_____ _| ▓▓_ +| ▓▓ \▓▓\ ▓▓ ▓▓\▓▓ ▓▓ ▓▓ \ ▓▓ \ + \▓▓ \▓▓\▓▓▓▓▓▓▓ \▓▓▓▓▓▓ \▓▓▓▓▓▓▓▓\▓▓▓▓▓▓ + +============================================= +A Command Line Interface for KubeBlocks +``` + +``` +kbcli [flags] +``` + +### Options + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for kbcli + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. +* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. +* [kbcli options](kbcli_options.md) - Print the list of flags inherited by all commands. +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. +* [kbcli trace](kbcli_trace.md) - trace management command +* [kbcli version](kbcli_version.md) - Print the version information, include kubernetes, KubeBlocks and kbcli version. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon.mdx b/docs/en/release-1_0_1/cli/kbcli_addon.mdx new file mode 100644 index 00000000..4948d30e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon.mdx @@ -0,0 +1,52 @@ +--- +title: kbcli addon +--- + +Addon command. + +### Options + +``` + -h, --help help for addon +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli addon describe](kbcli_addon_describe.md) - Describe an addon specification. +* [kbcli addon disable](kbcli_addon_disable.md) - Disable an addon. +* [kbcli addon enable](kbcli_addon_enable.md) - Enable an addon. +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes +* [kbcli addon install](kbcli_addon_install.md) - Install KubeBlocks addon +* [kbcli addon list](kbcli_addon_list.md) - List addons. +* [kbcli addon purge](kbcli_addon_purge.md) - Purge the sub-resources of specified addon and versions +* [kbcli addon search](kbcli_addon_search.md) - Search the addon from index +* [kbcli addon uninstall](kbcli_addon_uninstall.md) - Uninstall an existed addon +* [kbcli addon upgrade](kbcli_addon_upgrade.md) - Upgrade an existed addon to latest version or a specified version + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_describe.mdx new file mode 100644 index 00000000..c3b1991e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_describe.mdx @@ -0,0 +1,46 @@ +--- +title: kbcli addon describe +--- + +Describe an addon specification. + +``` +kbcli addon describe ADDON_NAME [flags] +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_disable.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_disable.mdx new file mode 100644 index 00000000..d90978e1 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_disable.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli addon disable +--- + +Disable an addon. + +``` +kbcli addon disable ADDON_NAME [flags] +``` + +### Examples + +``` + # Disable "prometheus" addon + kbcli addon disable prometheus + + # Disable addons in batch + kbcli addon disable prometheus csi-s3 +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --auto-approve Skip interactive approval before disabling addon + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for disable + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_enable.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_enable.mdx new file mode 100644 index 00000000..3bfcae27 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_enable.mdx @@ -0,0 +1,94 @@ +--- +title: kbcli addon enable +--- + +Enable an addon. + +``` +kbcli addon enable ADDON_NAME [flags] +``` + +### Examples + +``` + # Enabled "prometheus" addon + kbcli addon enable prometheus + + # Enabled "prometheus" addon with custom resources settings + kbcli addon enable prometheus --memory 512Mi/4Gi --storage 8Gi --replicas 2 + + # Enabled "prometheus" addon and its extra alertmanager component with custom resources settings + kbcli addon enable prometheus --memory 512Mi/4Gi --storage 8Gi --replicas 2 \ + --memory alertmanager:16Mi/256Mi --storage alertmanager:1Gi --replicas alertmanager:2 + + # Enabled "prometheus" addon with tolerations + kbcli addon enable prometheus \ + --tolerations '[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' \ + --tolerations 'alertmanager:[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' + + # Enabled "prometheus" addon with helm like custom settings + kbcli addon enable prometheus --set prometheus.alertmanager.image.tag=v0.24.0 + + # Force enabled "csi-s3" addon + kbcli addon enable csi-s3 --force + + # Enable addons in batch + kbcli addon enable prometheus csi-s3 +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --cpu stringArray Sets addon CPU resource values (--cpu [extraName:]/) (can specify multiple if has extra items)) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + --force ignoring the installable restrictions and forcefully enabling. + -h, --help help for enable + --memory stringArray Sets addon memory resource values (--memory [extraName:]/) (can specify multiple if has extra items)) + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --replicas stringArray Sets addon component replica count (--replicas [extraName:]) (can specify multiple if has extra items)) + --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2), it's only being processed if addon's type is helm. + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --storage stringArray Sets addon storage size (--storage [extraName:]) (can specify multiple if has extra items)). + Additional notes: + 1. Specify '0' value will remove storage values settings and explicitly disable 'persistentVolumeEnabled' attribute. + 2. For Helm type Addon, that resizing storage will fail if modified value is a storage request size + that belongs to StatefulSet's volume claim template, to resolve 'Failed' Addon status possible action is disable and + re-enable the addon (More info on how-to resize a PVC: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources). + + --storage-class stringArray Sets addon storage class name (--storage-class [extraName:]) (can specify multiple if has extra items)) + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. + --tolerations stringArray Sets addon pod tolerations (--tolerations [extraName:]) (can specify multiple if has extra items)) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_index.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_index.mdx new file mode 100644 index 00000000..c8a6b459 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_index.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli addon index +--- + +Manage custom addon indexes + +### Synopsis + +Manage which repositories are used to discover and install addon from. + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. +* [kbcli addon index add](kbcli_addon_index_add.md) - Add a new addon index +* [kbcli addon index delete](kbcli_addon_index_delete.md) - Delete an addon index +* [kbcli addon index list](kbcli_addon_index_list.md) - List addon indexes +* [kbcli addon index update](kbcli_addon_index_update.md) - update the specified index(es) + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_index_add.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_index_add.mdx new file mode 100644 index 00000000..50eb3a05 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_index_add.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli addon index add +--- + +Add a new addon index + +### Synopsis + +Configure a new index to install KubeBlocks addon from. + +``` +kbcli addon index add [flags] +``` + +### Examples + +``` +kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_index_delete.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_index_delete.mdx new file mode 100644 index 00000000..80e34e0b --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_index_delete.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli addon index delete +--- + +Delete an addon index + +### Synopsis + +Delete a configured addon index. + +``` +kbcli addon index delete [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_index_list.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_index_list.mdx new file mode 100644 index 00000000..04ac17ea --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_index_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli addon index list +--- + +List addon indexes + +### Synopsis + +Print a list of addon indexes. + +This command prints a list of addon indexes. It shows the name and the remote URL for +each addon index in table format. + +``` +kbcli addon index list [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_index_update.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_index_update.mdx new file mode 100644 index 00000000..9faf021d --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_index_update.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli addon index update +--- + +update the specified index(es) + +### Synopsis + +Update existed index repository from index origin URL + +``` +kbcli addon index update [flags] +``` + +### Examples + +``` +kbcli addon index update KubeBlocks +``` + +### Options + +``` + --all Upgrade all addon index + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_install.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_install.mdx new file mode 100644 index 00000000..727dc89c --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_install.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli addon install +--- + +Install KubeBlocks addon + +``` +kbcli addon install [flags] +``` + +### Examples + +``` + # install an addon from default index + kbcli addon install apecloud-mysql + + # install an addon from default index and skip KubeBlocks version compatibility check + kbcli addon install apecloud-mysql --force + + # install an addon from a specified index + kbcli addon install apecloud-mysql --index my-index + + # install an addon with a specified version default index + kbcli addon install apecloud-mysql --version 0.7.0 + + # install an addon with a specified version and cluster chart of different version. + kbcli addon install apecloud-mysql --version 0.7.0 --cluster-chart-version 0.7.1 + + # install an addon with a specified version and local path. + kbcli addon install apecloud-mysql --version 0.7.0 --path /path/to/local/chart +``` + +### Options + +``` + --cluster-chart-repo string specify the repo of cluster chart, use the url of 'kubeblocks-addons' by default (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + --cluster-chart-version string specify the cluster chart version, use the same version as the addon by default + --force force install the addon and ignore the version check + -h, --help help for install + --index string specify the addon index, use 'kubeblocks' by default (default "kubeblocks") + --path string specify the local path contains addon CRs and needs to be specified when operating offline + --version string specify the addon version to install, run 'kbcli addon search ' to get the available versions +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_list.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_list.mdx new file mode 100644 index 00000000..2404752a --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_list.mdx @@ -0,0 +1,51 @@ +--- +title: kbcli addon list +--- + +List addons. + +``` +kbcli addon list [flags] +``` + +### Options + +``` + --engines List engine addons only + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status stringArray Filter addons by status +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_purge.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_purge.mdx new file mode 100644 index 00000000..0d61ee16 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_purge.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli addon purge +--- + +Purge the sub-resources of specified addon and versions + +``` +kbcli addon purge [flags] +``` + +### Examples + +``` + # Purge specific versions of redis addon resources + kbcli addon purge redis --versions=0.9.1,0.9.2 + + # Purge all unused and outdated resources of redis addon + kbcli addon purge redis --all + + # Print the resources that would be purged, and no resource is actually purged + kbcli addon purge redis --dry-run +``` + +### Options + +``` + --all If set to true, all resources will be purged, including those that are unused and not the newest version. + --auto-approve Skip interactive approval before deleting + --dry-run If set to true, only print the resources that would be purged, and no resource is actually purged. + -h, --help help for purge + --versions strings Specify the versions of resources to purge. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_search.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_search.mdx new file mode 100644 index 00000000..10995adf --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_search.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli addon search +--- + +Search the addon from index + +``` +kbcli addon search [ADDON_NAME] [flags] +``` + +### Examples + +``` + # search the addons of all index + kbcli addon search + + # search the addons from a specified local path + kbcli addon search --path /path/to/local/chart + + # search different versions and indexes of an addon + kbcli addon search apecloud-mysql +``` + +### Options + +``` + -h, --help help for search + --path string the local directory contains addon CRs +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_uninstall.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_uninstall.mdx new file mode 100644 index 00000000..b2e169c3 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_uninstall.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli addon uninstall +--- + +Uninstall an existed addon + +``` +kbcli addon uninstall [flags] +``` + +### Examples + +``` + # uninstall an addon + kbcli addon uninstall apecloud-mysql + + # uninstall more than one addons + kbcli addon uninstall apecloud-mysql postgresql +``` + +### Options + +``` + --auto-approve Skip interactive approval before uninstalling addon + -h, --help help for uninstall +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_addon_upgrade.mdx b/docs/en/release-1_0_1/cli/kbcli_addon_upgrade.mdx new file mode 100644 index 00000000..fb4202e2 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_addon_upgrade.mdx @@ -0,0 +1,79 @@ +--- +title: kbcli addon upgrade +--- + +Upgrade an existed addon to latest version or a specified version + +``` +kbcli addon upgrade [flags] +``` + +### Examples + +``` + # upgrade an addon from default index to latest version + kbcli addon upgrade apecloud-mysql + + # upgrade an addon from default index to latest version and skip KubeBlocks version compatibility check + kbcli addon upgrade apecloud-mysql --force + + # upgrade an addon to latest version from a specified index + kbcli addon upgrade apecloud-mysql --index my-index + + # upgrade an addon with a specified version default index + kbcli addon upgrade apecloud-mysql --version 0.7.0 + + # upgrade an addon with a specified version, default index and a different version of cluster chart + kbcli addon upgrade apecloud-mysql --version 0.7.0 --cluster-chart-version 0.7.1 + + # non-inplace upgrade an addon with a specified version + kbcli addon upgrade apecloud-mysql --inplace=false --version 0.7.0 + + # non-inplace upgrade an addon with a specified addon name + kbcli addon upgrade apecloud-mysql --inplace=false --name apecloud-mysql-0.7.0 +``` + +### Options + +``` + --cluster-chart-repo string specify the repo of cluster chart, use the url of 'kubeblocks-addons' by default (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + --cluster-chart-version string specify the cluster chart version, use the same version as the addon by default + --force force upgrade the addon and ignore the version check + -h, --help help for upgrade + --index string specify the addon index index, use 'kubeblocks' by default (default "kubeblocks") + --inplace when inplace is false, it will retain the existing addon and reinstall the new version of the addon, otherwise the upgrade will be in-place. The default is true. (default true) + --name string name is the new version addon name need to set by user when inplace is false, it also will be used as resourceNamePrefix of an addon with multiple version. + --path string specify the local path contains addon CRs and needs to be specified when operating offline + --version string specify the addon version +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo.mdx new file mode 100644 index 00000000..b58385b6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo.mdx @@ -0,0 +1,48 @@ +--- +title: kbcli backuprepo +--- + +BackupRepo command. + +### Options + +``` + -h, --help help for backuprepo +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli backuprepo create](kbcli_backuprepo_create.md) - Create a backup repository +* [kbcli backuprepo delete](kbcli_backuprepo_delete.md) - Delete a backup repository. +* [kbcli backuprepo describe](kbcli_backuprepo_describe.md) - Describe a backup repository. +* [kbcli backuprepo list](kbcli_backuprepo_list.md) - List Backup Repositories. +* [kbcli backuprepo list-storage-provider](kbcli_backuprepo_list-storage-provider.md) - List storage providers. +* [kbcli backuprepo update](kbcli_backuprepo_update.md) - Update a backup repository. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_create.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_create.mdx new file mode 100644 index 00000000..7856317c --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_create.mdx @@ -0,0 +1,90 @@ +--- +title: kbcli backuprepo create +--- + +Create a backup repository + +``` +kbcli backuprepo create [NAME] [flags] +``` + +### Examples + +``` + # Create a default backup repository using S3 as the backend + kbcli backuprepo create \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --default + + # Create a non-default backup repository with a specified name + kbcli backuprepo create my-backup-repo \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key + + # Create a backup repository with a sub-path to isolate different repositories + kbcli backuprepo create my-backup-repo \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --path-prefix dev/team1 + + # Create a backup repository with a FTP backend + kbcli backuprepo create \ + --provider ftp \ + --ftp-host= \ + --ftp-port=21 \ + --ftp-user= \ + --ftp-password= +``` + +### Options + +``` + --access-method string Specify the access method for the backup repository, "Tool" is preferred if not specified. options: ["Mount" "Tool"] + --default Specify whether to set the created backup repository as default + -h, --help help for create + --path-prefix string Specify the prefix of the path for storing backup files. + --provider string Specify storage provider + --pv-reclaim-policy string Specify the reclaim policy for PVs created by this backup repository, the value can be "Retain" or "Delete". This option only takes effect when --access-method="Mount". (default "Retain") + --volume-capacity string Specify the capacity of the new created PVC. This option only takes effect when --access-method="Mount". (default "100Gi") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_delete.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_delete.mdx new file mode 100644 index 00000000..3af401d4 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli backuprepo delete +--- + +Delete a backup repository. + +``` +kbcli backuprepo delete [flags] +``` + +### Examples + +``` + # Delete a backuprepo + kbcli backuprepo delete my-backuprepo +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_describe.mdx new file mode 100644 index 00000000..5a176b05 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli backuprepo describe +--- + +Describe a backup repository. + +``` +kbcli backuprepo describe [flags] +``` + +### Examples + +``` + # Describe a backuprepo + kbcli backuprepo describe my-backuprepo +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_list-storage-provider.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_list-storage-provider.mdx new file mode 100644 index 00000000..af3e70e2 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_list-storage-provider.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli backuprepo list-storage-provider +--- + +List storage providers. + +``` +kbcli backuprepo list-storage-provider [flags] +``` + +### Examples + +``` + # List all storage provider + kbcli backuprepo list-sp +``` + +### Options + +``` + -h, --help help for list-storage-provider + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_list.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_list.mdx new file mode 100644 index 00000000..1367467e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_list.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli backuprepo list +--- + +List Backup Repositories. + +``` +kbcli backuprepo list [flags] +``` + +### Examples + +``` + # List all backup repositories + kbcli backuprepo list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_backuprepo_update.mdx b/docs/en/release-1_0_1/cli/kbcli_backuprepo_update.mdx new file mode 100644 index 00000000..9d68291c --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_backuprepo_update.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli backuprepo update +--- + +Update a backup repository. + +``` +kbcli backuprepo update BACKUP_REPO_NAME [flags] +``` + +### Examples + +``` + # Update the credential of a S3-based backuprepo + kbcli backuprepo update my-backuprepo --access-key-id= --secret-access-key= + + # Set the backuprepo as default + kbcli backuprepo update my-backuprepo --default + + # Unset the default backuprepo + kbcli backuprepo update my-backuprepo --default=false +``` + +### Options + +``` + --default Specify whether to set the created backup repo as default + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster.mdx new file mode 100644 index 00000000..881191d8 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster.mdx @@ -0,0 +1,86 @@ +--- +title: kbcli cluster +--- + +Cluster command. + +### Options + +``` + -h, --help help for cluster +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli cluster backup](kbcli_cluster_backup.md) - Create a backup for the cluster. +* [kbcli cluster cancel-ops](kbcli_cluster_cancel-ops.md) - Cancel the pending/creating/running OpsRequest which type is vscale or hscale. +* [kbcli cluster configure](kbcli_cluster_configure.md) - Configure parameters with the specified components in the cluster. +* [kbcli cluster connect](kbcli_cluster_connect.md) - Connect to a cluster or instance. +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - +* [kbcli cluster delete](kbcli_cluster_delete.md) - Delete clusters. +* [kbcli cluster delete-backup](kbcli_cluster_delete-backup.md) - Delete a backup. +* [kbcli cluster delete-ops](kbcli_cluster_delete-ops.md) - Delete an OpsRequest. +* [kbcli cluster describe](kbcli_cluster_describe.md) - Show details of a specific cluster. +* [kbcli cluster describe-backup](kbcli_cluster_describe-backup.md) - Describe a backup. +* [kbcli cluster describe-backup-policy](kbcli_cluster_describe-backup-policy.md) - Describe backup policy +* [kbcli cluster describe-config](kbcli_cluster_describe-config.md) - Show details of a specific reconfiguring. +* [kbcli cluster describe-ops](kbcli_cluster_describe-ops.md) - Show details of a specific OpsRequest. +* [kbcli cluster describe-restore](kbcli_cluster_describe-restore.md) - Describe a restore +* [kbcli cluster edit-backup-policy](kbcli_cluster_edit-backup-policy.md) - Edit backup policy +* [kbcli cluster edit-config](kbcli_cluster_edit-config.md) - Edit the config file of the component. +* [kbcli cluster explain-config](kbcli_cluster_explain-config.md) - List the constraint for supported configuration params. +* [kbcli cluster expose](kbcli_cluster_expose.md) - Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. +* [kbcli cluster label](kbcli_cluster_label.md) - Update the labels on cluster +* [kbcli cluster list](kbcli_cluster_list.md) - List clusters. +* [kbcli cluster list-backup-policies](kbcli_cluster_list-backup-policies.md) - List backups policies. +* [kbcli cluster list-backups](kbcli_cluster_list-backups.md) - List backups. +* [kbcli cluster list-components](kbcli_cluster_list-components.md) - List cluster components. +* [kbcli cluster list-events](kbcli_cluster_list-events.md) - List cluster events. +* [kbcli cluster list-instances](kbcli_cluster_list-instances.md) - List cluster instances. +* [kbcli cluster list-logs](kbcli_cluster_list-logs.md) - List supported log files in cluster. +* [kbcli cluster list-ops](kbcli_cluster_list-ops.md) - List all opsRequests. +* [kbcli cluster list-restores](kbcli_cluster_list-restores.md) - List restores. +* [kbcli cluster logs](kbcli_cluster_logs.md) - Access cluster log file. +* [kbcli cluster promote](kbcli_cluster_promote.md) - Promote a non-primary or non-leader instance as the new primary or leader of the cluster +* [kbcli cluster rebuild-instance](kbcli_cluster_rebuild-instance.md) - Rebuild the specified instances in the cluster. +* [kbcli cluster register](kbcli_cluster_register.md) - Pull the cluster chart to the local cache and register the type to 'create' sub-command +* [kbcli cluster restart](kbcli_cluster_restart.md) - Restart the specified components in the cluster. +* [kbcli cluster restore](kbcli_cluster_restore.md) - Restore a new cluster from backup. +* [kbcli cluster scale-in](kbcli_cluster_scale-in.md) - scale in replicas of the specified components in the cluster. +* [kbcli cluster scale-out](kbcli_cluster_scale-out.md) - scale out replicas of the specified components in the cluster. +* [kbcli cluster start](kbcli_cluster_start.md) - Start the cluster if cluster is stopped. +* [kbcli cluster stop](kbcli_cluster_stop.md) - Stop the cluster and release all the pods of the cluster. +* [kbcli cluster update](kbcli_cluster_update.md) - Update the cluster settings, such as enable or disable monitor or log. +* [kbcli cluster upgrade](kbcli_cluster_upgrade.md) - Upgrade the service version(only support to upgrade minor version). +* [kbcli cluster upgrade-to-v1](kbcli_cluster_upgrade-to-v1.md) - upgrade cluster to v1 api version. +* [kbcli cluster volume-expand](kbcli_cluster_volume-expand.md) - Expand volume with the specified components and volumeClaimTemplates in the cluster. +* [kbcli cluster vscale](kbcli_cluster_vscale.md) - Vertically scale the specified components in the cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_backup.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_backup.mdx new file mode 100644 index 00000000..d17c45b1 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_backup.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster backup +--- + +Create a backup for the cluster. + +``` +kbcli cluster backup NAME [flags] +``` + +### Examples + +``` + # Create a backup for the cluster, use the default backup policy and volume snapshot backup method + kbcli cluster backup mycluster + + # create a backup with a specified method, run "kbcli cluster desc-backup-policy mycluster" to show supported backup methods + kbcli cluster backup mycluster --method volume-snapshot + + # create a backup with specified backup policy, run "kbcli cluster list-backup-policies mycluster" to show the cluster supported backup policies + kbcli cluster backup mycluster --method volume-snapshot --policy + + # create a backup from a parent backup + kbcli cluster backup mycluster --parent-backup parent-backup-name +``` + +### Options + +``` + --deletion-policy string Deletion policy for backup, determine whether the backup content in backup repo will be deleted after the backup is deleted, supported values: [Delete, Retain] (default "Delete") + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for backup + --method string Backup methods are defined in backup policy (required), if only one backup method in backup policy, use it as default backup method, if multiple backup methods in backup policy, use method which volume snapshot is true as default backup method + --name string Backup name + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --parent-backup string Parent backup name, used for incremental backup + --policy string Backup policy name, if not specified, use the cluster default backup policy + --retention-period string Retention period for backup, supported values: [1y, 1mo, 1d, 1h, 1m] or combine them [1y1mo1d1h1m], if not specified, the backup will not be automatically deleted, you need to manually delete it. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_cancel-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_cancel-ops.mdx new file mode 100644 index 00000000..68439eca --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_cancel-ops.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli cluster cancel-ops +--- + +Cancel the pending/creating/running OpsRequest which type is vscale or hscale. + +``` +kbcli cluster cancel-ops NAME [flags] +``` + +### Examples + +``` + # cancel the opsRequest which is not completed. + kbcli cluster cancel-ops +``` + +### Options + +``` + --auto-approve Skip interactive approval before cancel the opsRequest + -h, --help help for cancel-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_configure.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_configure.mdx new file mode 100644 index 00000000..0f39e450 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_configure.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster configure +--- + +Configure parameters with the specified components in the cluster. + +``` +kbcli cluster configure NAME --set key=value[,key=value] [--components=component1-name,component2-name] [--config-spec=config-spec-name] [--config-file=config-file] [flags] +``` + +### Examples + +``` + # update component params + kbcli cluster configure mycluster --components=mysql --config-spec=mysql-3node-tpl --config-file=my.cnf --set=max_connections=1000,general_log=OFF + + # if only one component, and one config spec, and one config file, simplify the searching process of configure. e.g: + # update mysql max_connections, cluster name is mycluster + kbcli cluster configure mycluster --set max_connections=2000 +``` + +### Options + +``` + --auto-approve Skip interactive approval before reconfiguring the cluster + --components strings Component names to this operations + --config-file string Specify the name of the configuration file to be updated (e.g. for mysql: --config-file=my.cnf). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --config-spec string Specify the name of the configuration template to be updated (e.g. for apecloud-mysql: --config-spec=mysql-3node-tpl). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --force-restart Boolean flag to restart component. Default with false. + -h, --help help for configure + --local-file string Specify the local configuration file to be updated. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replace Boolean flag to enable replacing config file. Default with false. + --set strings Specify parameters list to be updated. For more details, refer to 'kbcli cluster describe-config'. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_connect.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_connect.mdx new file mode 100644 index 00000000..914cc36f --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_connect.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster connect +--- + +Connect to a cluster or instance. + +``` +kbcli cluster connect (NAME | -i INSTANCE-NAME) [flags] +``` + +### Examples + +``` + # connect to a specified cluster + kbcli cluster connect mycluster + + # connect to a specified instance + kbcli cluster connect -i mycluster-instance-0 + + # connect to a specified component + kbcli cluster connect mycluster --component mycomponent + + # show cli connection example, supported client: [cli, java, python, rust, php, node.js, go, .net, django] and more. + kbcli cluster connect mycluster --client=cli +``` + +### Options + +``` + --client string Which client connection example should be output. + --component string The component to connect. If not specified and no any cluster scope services, pick up the first one. + -h, --help help for connect + -i, --instance string The instance name to connect. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx new file mode 100644 index 00000000..ef4dbaf0 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster convert-to-v1 +--- + +convert cluster api version. + +``` +kbcli cluster convert-to-v1 [NAME] [flags] +``` + +### Examples + +``` + # convert a v1alpha1 cluster + kbcli cluster convert-to-v1 mycluster + + # convert a v1alpha1 cluster with --dry-run + kbcli cluster convert-to-v1 mycluster --dry-run +``` + +### Options + +``` + --dry-run dry run mode + -h, --help help for convert-to-v1 + --no-diff only print the new cluster yaml +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create.mdx new file mode 100644 index 00000000..6c380562 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create.mdx @@ -0,0 +1,68 @@ +--- +title: kbcli cluster create +--- + +Create a cluster. + +``` +kbcli cluster create [ClusterType] [flags] +``` + +### Examples + +``` + # Create a postgresql + kbcli cluster create postgresql my-cluster + + # Get the cluster yaml by dry-run + kbcli cluster create postgresql my-cluster --dry-run + + # Edit cluster yaml before creation. + kbcli cluster create mycluster --edit +``` + +### Options + +``` + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster create apecloud-mysql](kbcli_cluster_create_apecloud-mysql.md) - Create a apecloud-mysql cluster. +* [kbcli cluster create etcd](kbcli_cluster_create_etcd.md) - Create a etcd cluster. +* [kbcli cluster create kafka](kbcli_cluster_create_kafka.md) - Create a kafka cluster. +* [kbcli cluster create mongodb](kbcli_cluster_create_mongodb.md) - Create a mongodb cluster. +* [kbcli cluster create mysql](kbcli_cluster_create_mysql.md) - Create a mysql cluster. +* [kbcli cluster create postgresql](kbcli_cluster_create_postgresql.md) - Create a postgresql cluster. +* [kbcli cluster create qdrant](kbcli_cluster_create_qdrant.md) - Create a qdrant cluster. +* [kbcli cluster create rabbitmq](kbcli_cluster_create_rabbitmq.md) - Create a rabbitmq cluster. +* [kbcli cluster create redis](kbcli_cluster_create_redis.md) - Create a redis cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_apecloud-mysql.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_apecloud-mysql.mdx new file mode 100644 index 00000000..e654ed26 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_apecloud-mysql.mdx @@ -0,0 +1,86 @@ +--- +title: kbcli cluster create apecloud-mysql +--- + +Create a apecloud-mysql cluster. + +``` +kbcli cluster create apecloud-mysql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create apecloud-mysql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create apecloud-mysql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --etcd.local.etcdctl-api string (default "3") + --etcd.local.replicas int Value range [1, 3]. (default 3) + --etcd.local.resources.storage string (default "20Gi") + --etcd.local.service-version string (default "3.5.6") + --etcd.mode string Legal values [serviceRef, local]. (default "local") + --etcd.service-ref.cluster.component string (default "etcd") + --etcd.service-ref.cluster.credential string + --etcd.service-ref.cluster.name string + --etcd.service-ref.cluster.port string (default "client") + --etcd.service-ref.cluster.service string (default "headless") + --etcd.service-ref.namespace string (default "default") + --etcd.service-ref.service-descriptor string + -h, --help help for apecloud-mysql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, raftGroup]. (default "standalone") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --proxy-enabled Enable proxy or not. + --replicas int The number of replicas, for standalone mode, the replicas is 1, for raftGroup mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string MySQL Service Version. (default "8.0.30") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx new file mode 100644 index 00000000..4ea3d7e6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster create etcd +--- + +Create a etcd cluster. + +``` +kbcli cluster create etcd NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create etcd + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create etcd --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --client-service.node-port int Optional, if clientService type is NodePort, by default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767). + --client-service.port int The port on which the service will listen. (default 2379) + --client-service.role string Role of the service within the cluster. (default "leader") + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for etcd + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, the default replicas is 3. Value range [1, 5]. (default 3) + --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tls-enable Enable TLS for etcd cluster + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx new file mode 100644 index 00000000..49980c8e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx @@ -0,0 +1,89 @@ +--- +title: kbcli cluster create kafka +--- + +Create a kafka cluster. + +``` +kbcli cluster create kafka NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create kafka + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create kafka --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --broker-heap string Kafka broker's jvm heap setting. (default "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64") + --broker-replicas int The number of Kafka broker replicas for separated mode. Value range [1, 100]. (default 1) + --controller-heap string Kafka controller's jvm heap setting for separated mode (default "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64") + --controller-replicas int The number of Kafka controller replicas for separated mode. Legal values [1, 3, 5]. (default 1) + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --fixed-pod-ip-enabled advertised.listeners Whether to enable fixed Pod IP mode in Kafka's advertised.listeners + -h, --help help for kafka + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --meta-storage float Metadata Storage size, the unit is Gi. Value range [1, 10000]. (default 5) + --meta-storage-class string The StorageClass for Kafka Metadata Storage. + --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated]. (default "combined") + --monitor-enable Enable monitor for Kafka. (default true) + --monitor.limit.cpu float (default 0.5) + --monitor.limit.memory float (default 1) + --monitor.replicas int Number of replicas for the monitor component. Value range [1]. (default 1) + --monitor.request.cpu float (default 0.1) + --monitor.request.memory float (default 0.2) + --node-labels stringToString Node label selector (default []) + --node-port-enabled advertised.listeners Whether to enable NodePort mode in Kafka's advertised.listeners + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of Kafka broker replicas for combined mode. Legal values [1, 3, 5]. (default 1) + --sasl-enable Enable authentication using SASL/PLAIN for Kafka. + --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --storage-class string The StorageClass for Kafka Data Storage. + --storage-enable Enable storage for Kafka. + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. (default "3.3.2") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx new file mode 100644 index 00000000..96b9b695 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster create mongodb +--- + +Create a mongodb cluster. + +``` +kbcli cluster create mongodb NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create mongodb + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create mongodb --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for mongodb + --hostnetwork string Legal values [enabled, disabled]. (default "enabled") + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, replicaset]. (default "standalone") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. Legal values [8.0.8, 8.0.6, 8.0.4, 7.0.19, 7.0.16, 7.0.12, 6.0.22, 6.0.20, 6.0.16, 5.0.30, 5.0.28, 4.4.29, 4.2.24, 4.0.28]. (default "6.0.16") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx new file mode 100644 index 00000000..9cbe561e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx @@ -0,0 +1,78 @@ +--- +title: kbcli cluster create mysql +--- + +Create a mysql cluster. + +``` +kbcli cluster create mysql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create mysql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create mysql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 1) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for mysql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 1) + --node-labels stringToString Node label selector (default []) + --orchestrator.cluster-service-selector.cluster-name string orchestrator cluster name for service selector + --orchestrator.cluster-service-selector.namespace string orchestrator cluster namespace for service selector + --orchestrator.service-reference.endpoint string Endpoint name of the service reference, format: : + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --proxysql.cpu float (default 1) + --proxysql.memory float Memory, the unit is Gi. (default 1) + --proxysql.replicas int (default 1) + --replicas int The number of replicas. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology string Topology type of the serve. Note that under the orc/orc-proxysql topology, it is necessary to specify the Orchestrator cluster information. You should choose either orchestrator.cluster-service-selector or orchestrator.service-reference. This means that depending on your setup, you will configure one of these options to properly integrate with the Orchestrator service for managing your MySQL cluster. Legal values [semisync, semisync-proxysql, mgr, mgr-proxysql, orc, orc-proxysql]. (default "semisync") + --topology-keys stringArray Topology keys for affinity + --version string MySQL version Legal values [8.0.39, 8.0.38, 8.0.37, 8.0.36, 8.4.2, 8.4.1, 8.4.0, 5.7.44]. (default "8.0.39") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_postgresql.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_postgresql.mdx new file mode 100644 index 00000000..8a17025e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_postgresql.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli cluster create postgresql +--- + +Create a postgresql cluster. + +``` +kbcli cluster create postgresql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create postgresql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create postgresql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for postgresql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replication mode, the default replicas is 2. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string service version. (default "15.7.0") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_qdrant.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_qdrant.mdx new file mode 100644 index 00000000..be0d2f7b --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_qdrant.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli cluster create qdrant +--- + +Create a qdrant cluster. + +``` +kbcli cluster create qdrant NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create qdrant + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create qdrant --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 1) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for qdrant + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 2) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas. Value range [1, 16]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string The version of Qdrant. (default "1.10.0") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_rabbitmq.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_rabbitmq.mdx new file mode 100644 index 00000000..5becf0bb --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_rabbitmq.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster create rabbitmq +--- + +Create a rabbitmq cluster. + +``` +kbcli cluster create rabbitmq NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create rabbitmq + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create rabbitmq --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.1, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for rabbitmq + --memory float Memory, the unit is Gi. Value range [0.1, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [singlenode, clustermode]. (default "singlenode") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. Legal values [4.0.9, 3.13.7, 3.13.2, 3.12.14, 3.11.28, 3.10.25, 3.9.29, 3.8.14]. (default "3.13.7") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_redis.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_redis.mdx new file mode 100644 index 00000000..5c6e1817 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_redis.mdx @@ -0,0 +1,92 @@ +--- +title: kbcli cluster create redis +--- + +Create a redis cluster. + +``` +kbcli cluster create redis NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create redis + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create redis --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --custom-secret-name string the secret must contain keys named 'username' and 'password' + --custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for redis + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, replication, cluster, replication-twemproxy]. (default "replication") + --node-labels stringToString Node label selector (default []) + --node-port-enabled Whether NodePort service is enabled, default is true + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --redis-cluster.custom-secret-name string the secret must contain keys named 'username' and 'password' + --redis-cluster.custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --redis-cluster.shard-count float The number of shards in the redis cluster Value range [3, 2048]. (default 3) + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replication mode, the default replicas is 2. Value range [1, 5]. (default 1) + --sentinel.cpu float Sentinel component cpu cores. Value range [0.1, 8]. (default 0.2) + --sentinel.custom-master-name string Name of the master node monitored by Sentinel. If empty, a default value will be used. + --sentinel.custom-secret-name string the secret must contain keys named 'username' and 'password' + --sentinel.custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --sentinel.enabled Whether have sentinel component, default is true (default true) + --sentinel.memory float Sentinel component memory, the unit is Gi. Value range [0.1, 4]. (default 0.2) + --sentinel.replicas float Sentinel component replicas Value range [1, 5]. (default 3) + --sentinel.storage float Sentinel component storage size, the unit is Gi. Value range [1, 1024]. (default 20) + --sentinel.storage-class-name string Sentinel component storage class name + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --twemproxy.cpu float twemproxy component cpu cores. Value range [0.1, 8]. (default 0.2) + --twemproxy.enabled Whether have twemproxy component, default is false + --twemproxy.memory float twemproxy component memory, the unit is Gi. Value range [0.1, 4]. (default 0.2) + --twemproxy.replicas float twemproxy component replicas Value range [1, 5]. (default 3) + --version string Cluster version. (default "7.2.7") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx new file mode 100644 index 00000000..8bd6e19e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx @@ -0,0 +1,62 @@ +--- +title: kbcli cluster custom-ops +--- + + + +``` +kbcli cluster custom-ops OpsDef --cluster [flags] +``` + +### Examples + +``` + # custom ops cli format + kbcli cluster custom-ops --cluster + + # example for kafka topic + kbcli cluster custom-ops kafka-topic --cluster mycluster --type create --topic test --partition 3 --replicas 3 + + # example for kafka acl + kbcli cluster custom-ops kafka-user-acl --cluster mycluster --type add --operations "Read,Writer,Delete,Alter,Describe" --allowUsers client --topic "*" + + # example for kafka quota + kbcli cluster custom-ops kafka-quota --cluster mycluster --user client --producerByteRate 1024 --consumerByteRate 2048 +``` + +### Options + +``` + -h, --help help for custom-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_delete-backup.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_delete-backup.mdx new file mode 100644 index 00000000..94099819 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_delete-backup.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli cluster delete-backup +--- + +Delete a backup. + +``` +kbcli cluster delete-backup [flags] +``` + +### Examples + +``` + # delete a backup named backup-name + kbcli cluster delete-backup cluster-name --name backup-name +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-backup + --name strings Backup names + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_delete-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_delete-ops.mdx new file mode 100644 index 00000000..aefc6560 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_delete-ops.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster delete-ops +--- + +Delete an OpsRequest. + +``` +kbcli cluster delete-ops [flags] +``` + +### Examples + +``` + # delete all ops belong the specified cluster + kbcli cluster delete-ops mycluster + + # delete the specified ops belong the specify cluster + kbcli cluster delete-ops --name=mysql-restart-82zxv +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-ops + --name strings OpsRequest names + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_delete.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_delete.mdx new file mode 100644 index 00000000..435a7a89 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_delete.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster delete +--- + +Delete clusters. + +``` +kbcli cluster delete NAME [flags] +``` + +### Examples + +``` + # delete a cluster named mycluster + kbcli cluster delete mycluster + + # delete a cluster by label selector + kbcli cluster delete --selector clusterdefinition.kubeblocks.io/name=apecloud-mysql + + # delete a cluster named mycluster forcedly + kbcli cluster delete mycluster --force +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup-policy.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup-policy.mdx new file mode 100644 index 00000000..cd242872 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup-policy.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli cluster describe-backup-policy +--- + +Describe backup policy + +``` +kbcli cluster describe-backup-policy [flags] +``` + +### Examples + +``` + # describe the default backup policy of the cluster + kbcli cluster describe-backup-policy cluster-name + + # describe the backup policy of the cluster with specified name + kbcli cluster describe-backup-policy cluster-name --name backup-policy-name +``` + +### Options + +``` + -h, --help help for describe-backup-policy + --names strings Backup policy names +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup.mdx new file mode 100644 index 00000000..898aebd5 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-backup.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli cluster describe-backup +--- + +Describe a backup. + +``` +kbcli cluster describe-backup BACKUP-NAME [flags] +``` + +### Examples + +``` + # describe backups of the cluster + kbcli cluster describe-backup + + # describe a backup + kbcli cluster describe-backup --names +``` + +### Options + +``` + -h, --help help for describe-backup + --names strings Backup names +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-config.mdx new file mode 100644 index 00000000..8c972748 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-config.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster describe-config +--- + +Show details of a specific reconfiguring. + +``` +kbcli cluster describe-config [flags] +``` + +### Examples + +``` + # describe a cluster, e.g. cluster name is mycluster + kbcli cluster describe-config mycluster + + # describe a component, e.g. cluster name is mycluster, component name is mysql + kbcli cluster describe-config mycluster --component=mysql + + # describe all configuration files. + kbcli cluster describe-config mycluster --component=mysql --show-detail + + # describe a content of configuration file. + kbcli cluster describe-config mycluster --component=mysql --config-file=my.cnf --show-detail +``` + +### Options + +``` + --components strings Specify the name of Component to describe (e.g. for apecloud-mysql: --component=mysql). If the cluster has only one component, unset the parameter." + --config-file strings Specify the name of the configuration file to be describe (e.g. for mysql: --config-file=my.cnf). If unset, all files. + --config-specs strings Specify the name of the configuration template to describe. (e.g. for apecloud-mysql: --config-specs=mysql-3node-tpl) + -h, --help help for describe-config +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-ops.mdx new file mode 100644 index 00000000..295a08f0 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-ops.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe-ops +--- + +Show details of a specific OpsRequest. + +``` +kbcli cluster describe-ops [flags] +``` + +### Examples + +``` + # describe a specified OpsRequest + kbcli cluster describe-ops mysql-restart-82zxv +``` + +### Options + +``` + -h, --help help for describe-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe-restore.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-restore.mdx new file mode 100644 index 00000000..047a8577 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe-restore.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe-restore +--- + +Describe a restore + +``` +kbcli cluster describe-restore NAME [flags] +``` + +### Examples + +``` + # describe a restore + kbcli cluster describe-restore +``` + +### Options + +``` + -h, --help help for describe-restore +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_describe.mdx new file mode 100644 index 00000000..d3879cb7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe +--- + +Show details of a specific cluster. + +``` +kbcli cluster describe NAME [flags] +``` + +### Examples + +``` + # describe a specified cluster + kbcli cluster describe mycluster +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx new file mode 100644 index 00000000..efd0217f --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster diff-config +--- + +Show the difference in parameters between the two submitted OpsRequest. + +``` +kbcli cluster diff-config [flags] +``` + +### Examples + +``` + # compare config files + kbcli cluster diff-config opsrequest1 opsrequest2 +``` + +### Options + +``` + -h, --help help for diff-config +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_edit-backup-policy.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_edit-backup-policy.mdx new file mode 100644 index 00000000..eb3ca54e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_edit-backup-policy.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster edit-backup-policy +--- + +Edit backup policy + +``` +kbcli cluster edit-backup-policy +``` + +### Examples + +``` + # edit backup policy + kbcli cluster edit-backup-policy +``` + +### Options + +``` + -h, --help help for edit-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_edit-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_edit-config.mdx new file mode 100644 index 00000000..1c60a7f7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_edit-config.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster edit-config +--- + +Edit the config file of the component. + +``` +kbcli cluster edit-config NAME [--components=component-name] [--config-spec=config-spec-name] [--config-file=config-file] [flags] +``` + +### Examples + +``` + # update mysql max_connections, cluster name is mycluster + kbcli cluster edit-config mycluster +``` + +### Options + +``` + --components strings Component names to this operations + --config-file string Specify the name of the configuration file to be updated (e.g. for mysql: --config-file=my.cnf). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --config-spec string Specify the name of the configuration template to be updated (e.g. for apecloud-mysql: --config-spec=mysql-3node-tpl). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --enable-delete Boolean flag to enable delete configuration. Default with false. + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --force-restart Boolean flag to restart component. Default with false. + -h, --help help for edit-config + --local-file string Specify the local configuration file to be updated. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replace Boolean flag to enable replacing config file. Default with false. + --set strings Specify parameters list to be updated. For more details, refer to 'kbcli cluster describe-config'. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_explain-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_explain-config.mdx new file mode 100644 index 00000000..eb8ae49e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_explain-config.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster explain-config +--- + +List the constraint for supported configuration params. + +``` +kbcli cluster explain-config [flags] +``` + +### Examples + +``` + # explain a cluster, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster + + # explain a specified configure template, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --component=mysql --config-specs=mysql-3node-tpl + + # explain a specified configure template, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --component=mysql --config-specs=mysql-3node-tpl --trunc-document=false --trunc-enum=false + + # explain a specified parameters, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --param=sql_mode +``` + +### Options + +``` + --components strings Specify the name of Component to describe (e.g. for apecloud-mysql: --component=mysql). If the cluster has only one component, unset the parameter." + --config-specs strings Specify the name of the configuration template to describe. (e.g. for apecloud-mysql: --config-specs=mysql-3node-tpl) + -h, --help help for explain-config + --param string Specify the name of parameter to be query. It clearly display the details of the parameter. + --trunc-document If the document length of the parameter is greater than 100, it will be truncated. + --trunc-enum If the value list length of the parameter is greater than 20, it will be truncated. (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_expose.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_expose.mdx new file mode 100644 index 00000000..ac5cc1e6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_expose.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster expose +--- + +Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. + +``` +kbcli cluster expose NAME --enable=[true|false] --type=[intranet|internet] [flags] +``` + +### Examples + +``` + # Expose a cluster to intranet + kbcli cluster expose mycluster --type intranet --enable=true + + # Expose a cluster to public internet + kbcli cluster expose mycluster --type internet --enable=true + + # Stop exposing a cluster + kbcli cluster expose mycluster --type intranet --enable=false +``` + +### Options + +``` + --auto-approve Skip interactive approval before exposing the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --enable string Enable or disable the expose, values can be true or false + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for expose + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --role-selector roleSelector The Component's exposed Services may target replicas based on their roles using roleSelector, this flag must be set when the component specified has roles + --sub-type string Expose sub type, currently supported types are 'NodePort', 'LoadBalancer', only available if type is intranet (default "LoadBalancer") + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string Expose type, currently supported types are 'intranet', 'internet' +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_label.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_label.mdx new file mode 100644 index 00000000..928139be --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_label.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster label +--- + +Update the labels on cluster + +``` +kbcli cluster label NAME [flags] +``` + +### Examples + +``` + # list label for clusters with specified name + kbcli cluster label mycluster --list + + # add label 'env' and value 'dev' for clusters with specified name + kbcli cluster label mycluster env=dev + + # add label 'env' and value 'dev' for all clusters + kbcli cluster label env=dev --all + + # add label 'env' and value 'dev' for the clusters that match the selector + kbcli cluster label env=dev -l type=mysql + + # update cluster with the label 'env' with value 'test', overwriting any existing value + kbcli cluster label mycluster --overwrite env=test + + # delete label env for clusters with specified name + kbcli cluster label mycluster env- +``` + +### Options + +``` + --all Select all cluster + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + -h, --help help for label + --list If true, display the labels of the clusters + --overwrite If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels. + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-backup-policies.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-backup-policies.mdx new file mode 100644 index 00000000..8708d98c --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-backup-policies.mdx @@ -0,0 +1,61 @@ +--- +title: kbcli cluster list-backup-policies +--- + +List backups policies. + +``` +kbcli cluster list-backup-policies [flags] +``` + +### Examples + +``` + # list all backup policies + kbcli cluster list-backup-policies + + # using short cmd to list backup policy of the specified cluster + kbcli cluster list-bp mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-backup-policies + --names strings The backup policy name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-backups.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-backups.mdx new file mode 100644 index 00000000..6931bb56 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-backups.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster list-backups +--- + +List backups. + +``` +kbcli cluster list-backups [flags] +``` + +### Examples + +``` + # list all backups + kbcli cluster list-backups + + # list all backups of the cluster + kbcli cluster list-backups + + # list the specified backups + kbcli cluster list-backups --names b1,b2 +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-backups + --names strings The backup name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-components.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-components.mdx new file mode 100644 index 00000000..0c321d5a --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-components.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-components +--- + +List cluster components. + +``` +kbcli cluster list-components [flags] +``` + +### Examples + +``` + # list all components of all clusters in current namespace + kbcli cluster list-components + + # list all components of a specified cluster + kbcli cluster list-components mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-components + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-events.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-events.mdx new file mode 100644 index 00000000..0f8e3a93 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-events.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-events +--- + +List cluster events. + +``` +kbcli cluster list-events [flags] +``` + +### Examples + +``` + # list all events of all clusters in current namespace + kbcli cluster list-events + + # list all events of a specified cluster + kbcli cluster list-events mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-events + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-instances.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-instances.mdx new file mode 100644 index 00000000..f54c2081 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-instances.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-instances +--- + +List cluster instances. + +``` +kbcli cluster list-instances [flags] +``` + +### Examples + +``` + # list all instances of all clusters in current namespace + kbcli cluster list-instances + + # list all instances of a specified cluster + kbcli cluster list-instances mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-instances + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-logs.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-logs.mdx new file mode 100644 index 00000000..83d62d82 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-logs.mdx @@ -0,0 +1,61 @@ +--- +title: kbcli cluster list-logs +--- + +List supported log files in cluster. + +``` +kbcli cluster list-logs NAME [flags] +``` + +### Examples + +``` + # Display supported log files in cluster mycluster with all instance + kbcli cluster list-logs mycluster + + # Display supported log files in cluster mycluster with specify component my-component + kbcli cluster list-logs mycluster --component my-component + + # Display supported log files in cluster mycluster with specify instance my-instance-0 + kbcli cluster list-logs mycluster --instance my-instance-0 +``` + +### Options + +``` + --component string Component name. + -h, --help help for list-logs + -i, --instance string Instance name. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-ops.mdx new file mode 100644 index 00000000..c8498d16 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-ops.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster list-ops +--- + +List all opsRequests. + +``` +kbcli cluster list-ops [flags] +``` + +### Examples + +``` + # list all opsRequests + kbcli cluster list-ops + + # list all opsRequests of specified cluster + kbcli cluster list-ops mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-ops + --name string The OpsRequest name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status strings Options include all, pending, creating, running, canceling, failed. by default, outputs the pending/creating/running/canceling/failed OpsRequest. (default [pending,creating,running,canceling,failed]) + --type strings The OpsRequest type +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list-restores.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list-restores.mdx new file mode 100644 index 00000000..5d93e28b --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list-restores.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster list-restores +--- + +List restores. + +``` +kbcli cluster list-restores [flags] +``` + +### Examples + +``` + # list all restores + kbcli cluster list-restores + + # list all restores of the cluster + kbcli cluster list-restores + + # list the specified restores + kbcli cluster list-restores --names r1,r2 +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-restores + --names strings List restores in the specified cluster + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_list.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_list.mdx new file mode 100644 index 00000000..53f15f0e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_list.mdx @@ -0,0 +1,70 @@ +--- +title: kbcli cluster list +--- + +List clusters. + +``` +kbcli cluster list [NAME] [flags] +``` + +### Examples + +``` + # list all clusters + kbcli cluster list + + # list a single cluster with specified name + kbcli cluster list mycluster + + # list a single cluster in YAML output format + kbcli cluster list mycluster -o yaml + + # list a single cluster in JSON output format + kbcli cluster list mycluster -o json + + # list a single cluster in wide output format + kbcli cluster list mycluster -o wide +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status string Filter objects by given status. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_logs.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_logs.mdx new file mode 100644 index 00000000..a445b8b6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_logs.mdx @@ -0,0 +1,92 @@ +--- +title: kbcli cluster logs +--- + +Access cluster log file. + +``` +kbcli cluster logs NAME [flags] +``` + +### Examples + +``` + # Return snapshot logs from cluster mycluster with default primary instance (stdout) + kbcli cluster logs mycluster + + # Display only the most recent 20 lines from cluster mycluster with default primary instance (stdout) + kbcli cluster logs mycluster --tail=20 + + # Display stdout info of specific instance my-instance-0 (cluster name comes from annotation app.kubernetes.io/instance) + kbcli cluster logs --instance my-instance-0 + + # Return snapshot logs from cluster mycluster with specific instance my-instance-0 (stdout) + kbcli cluster logs mycluster --instance my-instance-0 + + # Return snapshot logs from cluster mycluster with specific instance my-instance-0 and specific container + # my-container (stdout) + kbcli cluster logs mycluster --instance my-instance-0 -c my-container + + # Return slow logs from cluster mycluster with default primary instance + kbcli cluster logs mycluster --file-type=slow + + # Begin streaming the slow logs from cluster mycluster with default primary instance + kbcli cluster logs -f mycluster --file-type=slow + + # Return the specific file logs from cluster mycluster with specific instance my-instance-0 + kbcli cluster logs mycluster --instance my-instance-0 --file-path=/var/log/yum.log + + # Return the specific file logs from cluster mycluster with specific instance my-instance-0 and specific + # container my-container + kbcli cluster logs mycluster --instance my-instance-0 -c my-container --file-path=/var/log/yum.log +``` + +### Options + +``` + -c, --container string Container name. + --file-path string Log-file path. File path has a priority over file-type. When file-path and file-type are unset, output stdout/stderr of target container. + --file-type string Log-file type. List them with list-logs cmd. When file-path and file-type are unset, output stdout/stderr of target container. + -f, --follow Specify if the logs should be streamed. + -h, --help help for logs + --ignore-errors If watching / following pod logs, allow for any errors that occur to be non-fatal. Only take effect for stdout&stderr. + -i, --instance string Instance name. + --limit-bytes int Maximum bytes of logs to return. + --prefix Prefix each log line with the log source (pod name and container name). Only take effect for stdout&stderr. + -p, --previous If true, print the logs for the previous instance of the container in a pod if it exists. Only take effect for stdout&stderr. + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. Only take effect for stdout&stderr. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. Only take effect for stdout&stderr. + --tail int Lines of recent log file to display. Defaults to -1 for showing all log lines. (default -1) + --timestamps Include timestamps on each line in the log output. Only take effect for stdout&stderr. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_promote.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_promote.mdx new file mode 100644 index 00000000..ad6f60d2 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_promote.mdx @@ -0,0 +1,62 @@ +--- +title: kbcli cluster promote +--- + +Promote a non-primary or non-leader instance as the new primary or leader of the cluster + +``` +kbcli cluster promote NAME [--instance ] [flags] +``` + +### Examples + +``` + # Promote the instance mycluster-mysql-1 as the new primary or leader. + kbcli cluster promote mycluster --candidate mycluster-mysql-1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --candidate string Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running "kbcli cluster list-instances" + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for promote + --instance string Specify the instance name that will transfer its role to the candidate pod, If not set, the current primary or leader of the cluster will be used. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_rebuild-instance.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_rebuild-instance.mdx new file mode 100644 index 00000000..fd1a83b6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_rebuild-instance.mdx @@ -0,0 +1,75 @@ +--- +title: kbcli cluster rebuild-instance +--- + +Rebuild the specified instances in the cluster. + +``` +kbcli cluster rebuild-instance NAME [flags] +``` + +### Examples + +``` + # rebuild instance by creating new instances and remove the specified instances after the new instances are ready. + kbcli cluster rebuild-instance mycluster --instances pod1,pod2 + + # rebuild instance to a new node. + kbcli cluster rebuild-instance mycluster --instances pod1 --node nodeName. + + # rebuild instance with the same pod name. + kbcli cluster rebuild-instance mycluster --instances pod1 --in-place + + # rebuild instance from backup and with the same pod name + kbcli cluster rebuild-instance mycluster --instances pod1,pod2 --backupName --in-place +``` + +### Options + +``` + --auto-approve Skip interactive approval before rebuilding the instances.gi + --backup string instances will be rebuild by the specified backup. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for rebuild-instance + --in-place rebuild the instance with the same pod name. if not set, will create a new instance by horizontalScaling and remove the instance after the new instance is ready + --instances strings instances which need to rebuild. + --name string OpsRequest name. if not specified, it will be randomly generated + --node strings specified the target node which rebuilds the instance on the node otherwise will rebuild on a random node. format: insName1=nodeName,insName2=nodeName + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --restore-env stringArray provide the necessary env for the 'Restore' operation from the backup. format: key1=value, key2=value + --source-backup-target string To rebuild a sharding component instance from a backup, you can specify the name of the source backup target + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_register.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_register.mdx new file mode 100644 index 00000000..4f04216d --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_register.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster register +--- + +Pull the cluster chart to the local cache and register the type to 'create' sub-command + +``` +kbcli cluster register [NAME] [flags] +``` + +### Examples + +``` + # Pull a cluster type to local and register it to "kbcli cluster create" sub-cmd from specified URL + kbcli cluster register orioledb --source https://github.com/apecloud/helm-charts/releases/download/orioledb-cluster-0.6.0-beta.44/orioledb-cluster-0.6.0-beta.44.tgz + + # Register a cluster type from a local path file + kbcli cluster register neon --source pkg/cli/cluster/charts/neon-cluster.tgz + + # Register a cluster type from a Helm repository, specifying the version and engine. + kbcli cluster register mysql --engine mysql --version 0.9.0 --repo https://jihulab.com/api/v4/projects/150246/packages/helm/stable +``` + +### Options + +``` + --alias string Set the cluster type alias + --engine string Specify the cluster chart name in helm repo + -h, --help help for register + --repo string Specify the url of helm repo which contains cluster charts (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + -S, --source string Specify the cluster type chart source, support a URL or a local file path + --version string Specify the version of cluster chart to register +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_restart.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_restart.mdx new file mode 100644 index 00000000..1bbbd728 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_restart.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster restart +--- + +Restart the specified components in the cluster. + +``` +kbcli cluster restart NAME [flags] +``` + +### Examples + +``` + # restart all components + kbcli cluster restart mycluster + + # specified component to restart, separate with commas for multiple components + kbcli cluster restart mycluster --components=mysql +``` + +### Options + +``` + --auto-approve Skip interactive approval before restarting the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for restart + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_restore.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_restore.mdx new file mode 100644 index 00000000..1d701860 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_restore.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster restore +--- + +Restore a new cluster from backup. + +``` +kbcli cluster restore [flags] +``` + +### Examples + +``` + # restore a new cluster from a backup + kbcli cluster restore new-cluster-name --backup backup-name +``` + +### Options + +``` + --backup string Backup name + --backup-namespace string Backup namespace + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for restore + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --restore-after-cluster-running do the postReady phase when the cluster is Running rather than the component is Running. + --restore-key string specify the key to restore in kv database, support multiple keys split by comma with wildcard pattern matching + --restore-key-ignore-errors whether or not to ignore errors when restore kv database by keys + --restore-to-time string point in time recovery(PITR) + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_scale-in.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_scale-in.mdx new file mode 100644 index 00000000..b6d37547 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_scale-in.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster scale-in +--- + +scale in replicas of the specified components in the cluster. + +``` +kbcli cluster scale-in Replicas [flags] +``` + +### Examples + +``` + # scale in 2 replicas + kbcli cluster scale-in mycluster --components=mysql --replicas=2 + + # offline specified instances + kbcli cluster scale-in mycluster --components=mysql --offline-instances pod1 + + # scale in 2 replicas, one of them is specified by "--offline-instances". + kbcli cluster scale-out mycluster --components=mysql --replicas=2 --offline-instances pod1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before horizontally scaling the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for scale-in + --name string OpsRequest name. if not specified, it will be randomly generated + --offline-instances strings offline the specified instances + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas string Replicas with the specified components + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_scale-out.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_scale-out.mdx new file mode 100644 index 00000000..e737c8d6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_scale-out.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster scale-out +--- + +scale out replicas of the specified components in the cluster. + +``` +kbcli cluster scale-out Replicas [flags] +``` + +### Examples + +``` + # scale out 2 replicas + kbcli cluster scale-out mycluster --components=mysql --replicas=2 + + # to bring the offline instances specified in compSpec.offlineInstances online. + kbcli cluster scale-out mycluster --components=mysql --online-instances pod1 + + # scale out 2 replicas, one of which is an instance that has already been taken offline. + kbcli cluster scale-out mycluster --components=mysql --replicas=2 --online-instances pod1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before horizontally scaling the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for scale-out + --name string OpsRequest name. if not specified, it will be randomly generated + --online-instances strings online the specified instances which have been offline + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas string Replica changes with the specified components + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_start.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_start.mdx new file mode 100644 index 00000000..dbc15d91 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_start.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster start +--- + +Start the cluster if cluster is stopped. + +``` +kbcli cluster start NAME [flags] +``` + +### Examples + +``` + # start the cluster when cluster is stopped + kbcli cluster start mycluster + + # start the component of the cluster when cluster is stopped + kbcli cluster start mycluster --components=mysql +``` + +### Options + +``` + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for start + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_stop.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_stop.mdx new file mode 100644 index 00000000..2acbb525 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_stop.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster stop +--- + +Stop the cluster and release all the pods of the cluster. + +``` +kbcli cluster stop NAME [flags] +``` + +### Examples + +``` + # stop the cluster and release all the pods of the cluster + kbcli cluster stop mycluster + + # stop the component of the cluster and release all the pods of the component + kbcli cluster stop mycluster --components=mysql +``` + +### Options + +``` + --auto-approve Skip interactive approval before stopping the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for stop + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_update.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_update.mdx new file mode 100644 index 00000000..62077f10 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_update.mdx @@ -0,0 +1,103 @@ +--- +title: kbcli cluster update +--- + +Update the cluster settings, such as enable or disable monitor or log. + +``` +kbcli cluster update NAME [flags] +``` + +### Examples + +``` + # update cluster mycluster termination policy to Delete + kbcli cluster update mycluster --termination-policy=Delete + + # enable cluster monitor + kbcli cluster update mycluster --monitor=true + + # update cluster tolerations + kbcli cluster update mycluster --tolerations='"key=engineType,value=mongo,operator=Equal,effect=NoSchedule","key=diskType,value=ssd,operator=Equal,effect=NoSchedule"' + + # edit cluster + kbcli cluster update mycluster --edit + + # enable cluster monitor and edit + # kbcli cluster update mycluster --monitor=true --edit + + # enable cluster auto backup + kbcli cluster update mycluster --backup-enabled=true + + # update cluster backup retention period + kbcli cluster update mycluster --backup-retention-period=1d + + # update cluster backup method + kbcli cluster update mycluster --backup-method=snapshot + + # update cluster backup cron expression + kbcli cluster update mycluster --backup-cron-expression="0 0 * * *" + + # update cluster backup starting deadline minutes + kbcli cluster update mycluster --backup-starting-deadline-minutes=10 + + # update cluster backup repo name + kbcli cluster update mycluster --backup-repo-name=repo1 + + # update cluster backup pitr enabled + kbcli cluster update mycluster --pitr-enabled=true +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --backup-cron-expression string the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron. + --backup-enabled Specify whether enabled automated backup + --backup-method string the backup method, view it by "kbcli cd describe ", if not specified, the default backup method will be to take snapshots of the volume + --backup-repo-name string the backup repository name + --backup-retention-period string a time string ending with the 'd'|'D'|'h'|'H' character to describe how long the Backup should be retained (default "1d") + --backup-starting-deadline-minutes int the deadline in minutes for starting the backup job if it misses its scheduled time for any reason + --disable-exporter Enable or disable monitoring (default true) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for update + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --pitr-enabled Specify whether enabled point in time recovery + --runtime-class-name string Specifies runtimeClassName for all Pods managed by this Cluster. + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. + --termination-policy string Termination policy, one of: (DoNotTerminate, Delete, WipeOut) (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect, key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade-to-v1.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade-to-v1.mdx new file mode 100644 index 00000000..1d91106f --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade-to-v1.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster upgrade-to-v1 +--- + +upgrade cluster to v1 api version. + +``` +kbcli cluster upgrade-to-v1 [NAME] [flags] +``` + +### Examples + +``` + # upgrade a v1alpha1 cluster to v1 cluster + kbcli cluster upgrade-to-v1 mycluster + + # upgrade a v1alpha1 cluster with --dry-run + kbcli cluster upgrade-to-v1 mycluster --dry-run +``` + +### Options + +``` + --dry-run dry run mode + -h, --help help for upgrade-to-v1 + --no-diff only print the new cluster yaml +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade.mdx new file mode 100644 index 00000000..72860af6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_upgrade.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster upgrade +--- + +Upgrade the service version(only support to upgrade minor version). + +``` +kbcli cluster upgrade NAME [flags] +``` + +### Examples + +``` + # upgrade the component to the target version + kbcli cluster upgrade mycluster --service-version=8.0.30 --components my-comp + + # upgrade the component with new component definition + kbcli cluster upgrade mycluster --component-def=8.0.30 --components my-comp + + # upgrade the component with new component definition and specified service version + kbcli cluster upgrade mycluster --component-def=8.0.30 --service-version=8.0.30 --components my-comp +``` + +### Options + +``` + --auto-approve Skip interactive approval before upgrading the cluster + --component-def string Referring to the ComponentDefinition (default "nil") + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for upgrade + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --service-version string Referring to the serviceVersion that is provided by ComponentDefinition and ComponentVersion (default "nil") + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_volume-expand.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_volume-expand.mdx new file mode 100644 index 00000000..43d86e0c --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_volume-expand.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster volume-expand +--- + +Expand volume with the specified components and volumeClaimTemplates in the cluster. + +``` +kbcli cluster volume-expand NAME [flags] +``` + +### Examples + +``` + # restart specifies the component, separate with commas for multiple components + kbcli cluster volume-expand mycluster --components=mysql --volume-claim-templates=data --storage=10Gi +``` + +### Options + +``` + --auto-approve Skip interactive approval before expanding the cluster volume + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for volume-expand + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --storage string Volume storage size (required) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + -t, --volume-claim-templates strings VolumeClaimTemplate names in components (required) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_vscale.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_vscale.mdx new file mode 100644 index 00000000..758d721a --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_vscale.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster vscale +--- + +Vertically scale the specified components in the cluster. + +``` +kbcli cluster vscale NAME [flags] +``` + +### Examples + +``` + # scale the computing resources of specified components, separate with commas for multiple components + kbcli cluster vscale mycluster --components=mysql --cpu=500m --memory=500Mi + + # scale the computing resources of instance template, separate with commas for multiple components + kbcli cluster vscale mycluster --components=mysql --cpu=500m --memory=500Mi --instance-tpl default +``` + +### Options + +``` + --auto-approve Skip interactive approval before vertically scaling the cluster + --components strings Component names to this operations + --cpu string Request and limit size of component cpu + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for vscale + --instance-tpl strings vertically scaling the specified instance template in the specified component + --memory string Request and limit size of component memory + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_clusterdefinition.mdx b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition.mdx new file mode 100644 index 00000000..b5a28772 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli clusterdefinition +--- + +ClusterDefinition command. + +### Options + +``` + -h, --help help for clusterdefinition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli clusterdefinition describe](kbcli_clusterdefinition_describe.md) - Describe ClusterDefinition. +* [kbcli clusterdefinition list](kbcli_clusterdefinition_list.md) - List ClusterDefinitions. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_describe.mdx new file mode 100644 index 00000000..12da1191 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli clusterdefinition describe +--- + +Describe ClusterDefinition. + +``` +kbcli clusterdefinition describe [flags] +``` + +### Examples + +``` + # describe a specified cluster definition + kbcli clusterdefinition describe myclusterdef +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_list.mdx b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_list.mdx new file mode 100644 index 00000000..6a4459db --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_clusterdefinition_list.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli clusterdefinition list +--- + +List ClusterDefinitions. + +``` +kbcli clusterdefinition list [flags] +``` + +### Examples + +``` + # list all ClusterDefinitions + kbcli clusterdefinition list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentdefinition.mdx b/docs/en/release-1_0_1/cli/kbcli_componentdefinition.mdx new file mode 100644 index 00000000..66476b89 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentdefinition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli componentdefinition +--- + +ComponentDefinition command. + +### Options + +``` + -h, --help help for componentdefinition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli componentdefinition describe](kbcli_componentdefinition_describe.md) - Describe ComponentDefinition. +* [kbcli componentdefinition list](kbcli_componentdefinition_list.md) - List ComponentDefinition. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentdefinition_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_componentdefinition_describe.mdx new file mode 100644 index 00000000..7c191f54 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentdefinition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli componentdefinition describe +--- + +Describe ComponentDefinition. + +``` +kbcli componentdefinition describe [flags] +``` + +### Examples + +``` + # describe a specified component definition + kbcli componentdefinition describe mycomponentdef +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentdefinition_list.mdx b/docs/en/release-1_0_1/cli/kbcli_componentdefinition_list.mdx new file mode 100644 index 00000000..9fa47063 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentdefinition_list.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli componentdefinition list +--- + +List ComponentDefinition. + +``` +kbcli componentdefinition list [flags] +``` + +### Examples + +``` + # list all ComponentDefinitions + kbcli componentdefinition list + + # list all ComponentDefinitions by alias + kbcli cmpd list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentversion.mdx b/docs/en/release-1_0_1/cli/kbcli_componentversion.mdx new file mode 100644 index 00000000..0e9c3152 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentversion.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli componentversion +--- + +ComponentVersions command. + +### Options + +``` + -h, --help help for componentversion +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli componentversion describe](kbcli_componentversion_describe.md) - Describe ComponentVersion. +* [kbcli componentversion list](kbcli_componentversion_list.md) - List ComponentVersion. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentversion_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_componentversion_describe.mdx new file mode 100644 index 00000000..2e65ea6f --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentversion_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli componentversion describe +--- + +Describe ComponentVersion. + +``` +kbcli componentversion describe [flags] +``` + +### Examples + +``` + # describe a specified componentversion + kbcli componentversion describe mycomponentversion +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_componentversion_list.mdx b/docs/en/release-1_0_1/cli/kbcli_componentversion_list.mdx new file mode 100644 index 00000000..6bf3dec0 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_componentversion_list.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli componentversion list +--- + +List ComponentVersion. + +``` +kbcli componentversion list [flags] +``` + +### Examples + +``` + # list all ComponentVersions + kbcli componentversion list + + # list all ComponentVersions by alias + kbcli cmpv list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection.mdx new file mode 100644 index 00000000..7d23e182 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli dataprotection +--- + +Data protection command. + +### Options + +``` + -h, --help help for dataprotection +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli dataprotection backup](kbcli_dataprotection_backup.md) - Create a backup for the cluster. +* [kbcli dataprotection delete-backup](kbcli_dataprotection_delete-backup.md) - Delete a backup. +* [kbcli dataprotection describe-backup](kbcli_dataprotection_describe-backup.md) - Describe a backup +* [kbcli dataprotection describe-backup-policy](kbcli_dataprotection_describe-backup-policy.md) - Describe a backup policy +* [kbcli dataprotection describe-restore](kbcli_dataprotection_describe-restore.md) - Describe a restore +* [kbcli dataprotection edit-backup-policy](kbcli_dataprotection_edit-backup-policy.md) - Edit backup policy +* [kbcli dataprotection list-action-sets](kbcli_dataprotection_list-action-sets.md) - List actionsets +* [kbcli dataprotection list-backup-policies](kbcli_dataprotection_list-backup-policies.md) - List backup policies +* [kbcli dataprotection list-backup-policy-templates](kbcli_dataprotection_list-backup-policy-templates.md) - List backup policy templates +* [kbcli dataprotection list-backups](kbcli_dataprotection_list-backups.md) - List backups. +* [kbcli dataprotection list-restores](kbcli_dataprotection_list-restores.md) - List restores. +* [kbcli dataprotection restore](kbcli_dataprotection_restore.md) - Restore a new cluster from backup + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_backup.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_backup.mdx new file mode 100644 index 00000000..5608cb7d --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_backup.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli dataprotection backup +--- + +Create a backup for the cluster. + +``` +kbcli dataprotection backup NAME [flags] +``` + +### Examples + +``` + # Create a backup for the cluster, use the default backup policy and volume snapshot backup method + kbcli dp backup mybackup --cluster mycluster + + # create a backup with a specified method, run "kbcli cluster desc-backup-policy mycluster" to show supported backup methods + kbcli dp backup mybackup --cluster mycluster --method mymethod + + # create a backup with specified backup policy, run "kbcli cluster list-backup-policies mycluster" to show the cluster supported backup policies + kbcli dp backup mybackup --cluster mycluster --policy mypolicy + + # create a backup from a parent backup + kbcli dp backup mybackup --cluster mycluster --parent-backup myparentbackup +``` + +### Options + +``` + --cluster string Cluster name + --deletion-policy string Deletion policy for backup, determine whether the backup content in backup repo will be deleted after the backup is deleted, supported values: [Delete, Retain] (default "Delete") + -h, --help help for backup + --method string Backup methods are defined in backup policy (required), if only one backup method in backup policy, use it as default backup method, if multiple backup methods in backup policy, use method which volume snapshot is true as default backup method + --parent-backup string Parent backup name, used for incremental backup + --policy string Backup policy name, if not specified, use the cluster default backup policy + --retention-period string Retention period for backup, supported values: [1y, 1mo, 1d, 1h, 1m] or combine them [1y1mo1d1h1m], if not specified, the backup will not be automatically deleted, you need to manually delete it. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_delete-backup.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_delete-backup.mdx new file mode 100644 index 00000000..4e694c84 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_delete-backup.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli dataprotection delete-backup +--- + +Delete a backup. + +``` +kbcli dataprotection delete-backup [flags] +``` + +### Examples + +``` + # delete a backup + kbcli dp delete-backup mybackup +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --cluster string The cluster name. + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-backup + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup-policy.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup-policy.mdx new file mode 100644 index 00000000..9cc91abd --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup-policy.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection describe-backup-policy +--- + +Describe a backup policy + +``` +kbcli dataprotection describe-backup-policy [flags] +``` + +### Examples + +``` + # describe the default backup policy of the cluster + kbcli dp describe-backup-policy cluster-name + + # describe the backup policy of the cluster with specified name + kbcli dp describe-backup-policy cluster-name --name backup-policy-name +``` + +### Options + +``` + -h, --help help for describe-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup.mdx new file mode 100644 index 00000000..5fb493ac --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-backup.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection describe-backup +--- + +Describe a backup + +``` +kbcli dataprotection describe-backup NAME [flags] +``` + +### Examples + +``` + # describe a backup + kbcli dp describe-backup mybackup +``` + +### Options + +``` + -h, --help help for describe-backup +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-restore.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-restore.mdx new file mode 100644 index 00000000..474db84a --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_describe-restore.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection describe-restore +--- + +Describe a restore + +``` +kbcli dataprotection describe-restore NAME [flags] +``` + +### Examples + +``` + # describe a restore + kbcli dp describe-restore +``` + +### Options + +``` + -h, --help help for describe-restore +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_edit-backup-policy.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_edit-backup-policy.mdx new file mode 100644 index 00000000..6a77bf01 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_edit-backup-policy.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection edit-backup-policy +--- + +Edit backup policy + +``` +kbcli dataprotection edit-backup-policy +``` + +### Examples + +``` + # edit backup policy + kbcli dp edit-backup-policy +``` + +### Options + +``` + -h, --help help for edit-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-action-sets.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-action-sets.mdx new file mode 100644 index 00000000..d0e6d556 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-action-sets.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection list-action-sets +--- + +List actionsets + +``` +kbcli dataprotection list-action-sets [flags] +``` + +### Examples + +``` + # list all action sets + kbcli dp list-as +``` + +### Options + +``` + -h, --help help for list-action-sets + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policies.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policies.mdx new file mode 100644 index 00000000..ab169b4e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policies.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli dataprotection list-backup-policies +--- + +List backup policies + +``` +kbcli dataprotection list-backup-policies [flags] +``` + +### Examples + +``` + # list all backup policies + kbcli dp list-backup-policies + + # using short cmd to list backup policy of the specified cluster + kbcli dp list-bp mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string The cluster name + -h, --help help for list-backup-policies + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policy-templates.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policy-templates.mdx new file mode 100644 index 00000000..96d3bda7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backup-policy-templates.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection list-backup-policy-templates +--- + +List backup policy templates + +``` +kbcli dataprotection list-backup-policy-templates [flags] +``` + +### Examples + +``` + # list all backup policy template + kbcli dp list-bpt +``` + +### Options + +``` + -h, --help help for list-backup-policy-templates + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backups.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backups.mdx new file mode 100644 index 00000000..5081f50e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-backups.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli dataprotection list-backups +--- + +List backups. + +``` +kbcli dataprotection list-backups [flags] +``` + +### Examples + +``` + # list all backups + kbcli dp list-backups + + # list all backups of specified cluster + kbcli dp list-backups --cluster mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string List backups in the specified cluster + -h, --help help for list-backups + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-restores.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-restores.mdx new file mode 100644 index 00000000..989702d6 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_list-restores.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli dataprotection list-restores +--- + +List restores. + +``` +kbcli dataprotection list-restores [flags] +``` + +### Examples + +``` + # list all restores + kbcli dp list-restores +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string List restores in the specified cluster + -h, --help help for list-restores + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_dataprotection_restore.mdx b/docs/en/release-1_0_1/cli/kbcli_dataprotection_restore.mdx new file mode 100644 index 00000000..596e49b7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_dataprotection_restore.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli dataprotection restore +--- + +Restore a new cluster from backup + +``` +kbcli dataprotection restore [flags] +``` + +### Examples + +``` + # restore a new cluster from a backup + kbcli dp restore mybackup --cluster cluster-name +``` + +### Options + +``` + --cluster string The cluster to restore + -h, --help help for restore + --restore-key string specify the key to restore in kv database, support multiple keys split by comma with wildcard pattern matching + --restore-key-ignore-errors whether or not to ignore errors when restore kv database by keys + --restore-to-time string point in time recovery(PITR) + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks.mdx new file mode 100644 index 00000000..c3089942 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks.mdx @@ -0,0 +1,51 @@ +--- +title: kbcli kubeblocks +--- + +KubeBlocks operation commands. + +### Options + +``` + -h, --help help for kubeblocks +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli kubeblocks compare](kbcli_kubeblocks_compare.md) - List the changes between two different version KubeBlocks. +* [kbcli kubeblocks config](kbcli_kubeblocks_config.md) - KubeBlocks config. +* [kbcli kubeblocks describe-config](kbcli_kubeblocks_describe-config.md) - Describe KubeBlocks config. +* [kbcli kubeblocks install](kbcli_kubeblocks_install.md) - Install KubeBlocks. +* [kbcli kubeblocks list-versions](kbcli_kubeblocks_list-versions.md) - List KubeBlocks versions. +* [kbcli kubeblocks preflight](kbcli_kubeblocks_preflight.md) - Run and retrieve preflight checks for KubeBlocks. +* [kbcli kubeblocks status](kbcli_kubeblocks_status.md) - Show list of resource KubeBlocks uses or owns. +* [kbcli kubeblocks uninstall](kbcli_kubeblocks_uninstall.md) - Uninstall KubeBlocks. +* [kbcli kubeblocks upgrade](kbcli_kubeblocks_upgrade.md) - Upgrade KubeBlocks. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_compare.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_compare.mdx new file mode 100644 index 00000000..cdc4078b --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_compare.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli kubeblocks compare +--- + +List the changes between two different version KubeBlocks. + +``` +kbcli kubeblocks compare version [OTHER-VERSION] [flags] +``` + +### Examples + +``` + # compare installed KubeBlocks with specified version + kbcli kubeblocks compare 0.4.0 + + # compare two specified KubeBlocks version + kbcli kubeblocks compare 0.4.0 0.5.0 +``` + +### Options + +``` + --details show the different details between two kubeblocks version + -h, --help help for compare +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_config.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_config.mdx new file mode 100644 index 00000000..9c601c63 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_config.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli kubeblocks config +--- + +KubeBlocks config. + +``` +kbcli kubeblocks config [flags] +``` + +### Examples + +``` + # Enable the snapshot-controller and volume snapshot, to support snapshot backup. + kbcli kubeblocks config --set snapshot-controller.enabled=true +``` + +### Options + +``` + -h, --help help for config + -n, --namespace string KubeBlocks namespace + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_describe-config.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_describe-config.mdx new file mode 100644 index 00000000..1e51a58e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_describe-config.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli kubeblocks describe-config +--- + +Describe KubeBlocks config. + +``` +kbcli kubeblocks describe-config [flags] +``` + +### Examples + +``` + # Describe the KubeBlocks config. + kbcli kubeblocks describe-config + # Describe all the KubeBlocks configs + kbcli kubeblocks describe-config --all + # Describe the desired KubeBlocks configs by filter conditions + kbcli kubeblocks describe-config --filter=addonController,affinity +``` + +### Options + +``` + -A, --all show all kubeblocks configs value + --filter string filter the desired kubeblocks configs, multiple filtered strings are comma separated + -h, --help help for describe-config + -n, --namespace string KubeBlocks namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_install.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_install.mdx new file mode 100644 index 00000000..75a861ae --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_install.mdx @@ -0,0 +1,81 @@ +--- +title: kbcli kubeblocks install +--- + +Install KubeBlocks. + +``` +kbcli kubeblocks install [flags] +``` + +### Examples + +``` + # Install KubeBlocks, the default version is same with the kbcli version, the default namespace is kb-system + kbcli kubeblocks install + + # Install KubeBlocks with specified version + kbcli kubeblocks install --version=0.4.0 + + # Install KubeBlocks with ignoring preflight checks + kbcli kubeblocks install --force + + # Install KubeBlocks with specified namespace, if the namespace is not present, it will be created + kbcli kubeblocks install --namespace=my-namespace --create-namespace + + # Install KubeBlocks with other settings, for example, set replicaCount to 3 + kbcli kubeblocks install --set replicaCount=3 +``` + +### Options + +``` + --check Check kubernetes environment before installation (default true) + --create-namespace Create the namespace if not present + --force If present, just print fail item and continue with the following steps + -h, --help help for install + -n, --namespace string KubeBlocks namespace (default "kb-system") + --node-labels stringToString Node label selector (default []) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout duration Time to wait for installing KubeBlocks, such as --timeout=10m (default 30m0s) + --tolerations strings Tolerations for Kubeblocks, such as '"dev=true:NoSchedule,large=true:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) + --version string KubeBlocks version + --wait Wait for KubeBlocks to be ready, including all the auto installed add-ons. It will wait for a --timeout period (default true) + --wait-addons Wait for auto installed add-ons. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_list-versions.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_list-versions.mdx new file mode 100644 index 00000000..875259da --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_list-versions.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli kubeblocks list-versions +--- + +List KubeBlocks versions. + +``` +kbcli kubeblocks list-versions [flags] +``` + +### Examples + +``` + # list KubeBlocks release versions + kbcli kubeblocks list-versions + + # list KubeBlocks versions including development versions, such as alpha, beta and release candidate + kbcli kubeblocks list-versions --devel +``` + +### Options + +``` + --devel Use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. + -h, --help help for list-versions + --limit int Maximum rows of versions to return, 0 means no limit (default 10) (default 10) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_preflight.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_preflight.mdx new file mode 100644 index 00000000..93abf5bc --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_preflight.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli kubeblocks preflight +--- + +Run and retrieve preflight checks for KubeBlocks. + +``` +kbcli kubeblocks preflight [flags] +``` + +### Examples + +``` + # Run preflight provider checks against the default rules automatically + kbcli kubeblocks preflight + + # Run preflight provider checks and output more verbose info + kbcli kubeblocks preflight --verbose + + # Run preflight checks against the customized rules of preflight-check.yaml + kbcli kubeblocks preflight preflight-check.yaml + + # Run preflight checks and display AnalyzeResults with interactive mode + kbcli kubeblocks preflight preflight-check.yaml --interactive=true +``` + +### Options + +``` + --collect-without-permissions always run preflight checks even if some required permissions that preflight does not have (default true) + --collector-image string the full name of the collector image to use + --collector-pullpolicy string the pull policy of the collector image + --debug enable debug logging + --format string output format, one of json, yaml. only used when interactive is set to false, default format is yaml (default "yaml") + -h, --help help for preflight + -n, --namespace string If present, the namespace scope for this CLI request + -o, --output string specify the output file path for the preflight checks + --selector string selector (label query) to filter remote collection nodes on. + --since string force pod logs collectors to return logs newer than a relative duration like 5s, 2m, or 3h. + --since-time string force pod logs collectors to return logs after a specific date (RFC3339) + --verbose print more verbose logs, default value is false +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_status.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_status.mdx new file mode 100644 index 00000000..dce44bb0 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_status.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli kubeblocks status +--- + +Show list of resource KubeBlocks uses or owns. + +``` +kbcli kubeblocks status [flags] +``` + +### Examples + +``` + # list workloads owned by KubeBlocks + kbcli kubeblocks status + + # list all resources owned by KubeBlocks, such as workloads, cluster definitions, backup template. + kbcli kubeblocks status --all +``` + +### Options + +``` + -A, --all Show all resources, including configurations, storages, etc + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_uninstall.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_uninstall.mdx new file mode 100644 index 00000000..b9f33392 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_uninstall.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli kubeblocks uninstall +--- + +Uninstall KubeBlocks. + +``` +kbcli kubeblocks uninstall [flags] +``` + +### Examples + +``` + # uninstall KubeBlocks + kbcli kubeblocks uninstall +``` + +### Options + +``` + --auto-approve Skip interactive approval before uninstalling KubeBlocks + -h, --help help for uninstall + -n, --namespace string KubeBlocks namespace + --remove-namespace Remove default created "kb-system" namespace or not + --remove-pvcs Remove PersistentVolumeClaim or not + --remove-pvs Remove PersistentVolume or not + --timeout duration Time to wait for uninstalling KubeBlocks, such as --timeout=5m (default 10m0s) + --wait Wait for KubeBlocks to be uninstalled, including all the add-ons. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_kubeblocks_upgrade.mdx b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_upgrade.mdx new file mode 100644 index 00000000..f31d2edf --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_kubeblocks_upgrade.mdx @@ -0,0 +1,66 @@ +--- +title: kbcli kubeblocks upgrade +--- + +Upgrade KubeBlocks. + +``` +kbcli kubeblocks upgrade [flags] +``` + +### Examples + +``` + # Upgrade KubeBlocks to specified version + kbcli kubeblocks upgrade --version=0.4.0 + + # Upgrade KubeBlocks other settings, for example, set replicaCount to 3 + kbcli kubeblocks upgrade --set replicaCount=3 +``` + +### Options + +``` + --auto-approve Skip interactive approval before upgrading KubeBlocks + --check Check kubernetes environment before upgrade (default true) + -h, --help help for upgrade + -n, --namespace string KubeBlocks namespace + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout duration Time to wait for upgrading KubeBlocks, such as --timeout=10m (default 30m0s) + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) + --version string Set KubeBlocks version + --wait Wait for KubeBlocks to be ready. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_ops-definition.mdx b/docs/en/release-1_0_1/cli/kbcli_ops-definition.mdx new file mode 100644 index 00000000..a7b92725 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_ops-definition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli ops-definition +--- + +ops-definitions command. + +### Options + +``` + -h, --help help for ops-definition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli ops-definition describe](kbcli_ops-definition_describe.md) - Describe OpsDefinition. +* [kbcli ops-definition list](kbcli_ops-definition_list.md) - List OpsDefinition. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_ops-definition_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_ops-definition_describe.mdx new file mode 100644 index 00000000..f61dcc33 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_ops-definition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli ops-definition describe +--- + +Describe OpsDefinition. + +``` +kbcli ops-definition describe [flags] +``` + +### Examples + +``` + # describe a specified ops-definition + kbcli ops-definition describe my-ops-definition +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_ops-definition_list.mdx b/docs/en/release-1_0_1/cli/kbcli_ops-definition_list.mdx new file mode 100644 index 00000000..831bad01 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_ops-definition_list.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli ops-definition list +--- + +List OpsDefinition. + +``` +kbcli ops-definition list [flags] +``` + +### Examples + +``` + # list all ops-definitions + kbcli ops-definition list + + # list all ops-definitions by alias + kbcli ops-def list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_options.mdx b/docs/en/release-1_0_1/cli/kbcli_options.mdx new file mode 100644 index 00000000..38a4a122 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_options.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli options +--- + +Print the list of flags inherited by all commands. + +``` +kbcli options [flags] +``` + +### Examples + +``` + + # Print flags inherited by all commands + kbcli options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_playground.mdx b/docs/en/release-1_0_1/cli/kbcli_playground.mdx new file mode 100644 index 00000000..c6414a79 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_playground.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli playground +--- + +Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +### Options + +``` + -h, --help help for playground +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli playground destroy](kbcli_playground_destroy.md) - Destroy the playground KubeBlocks and kubernetes cluster. +* [kbcli playground init](kbcli_playground_init.md) - Bootstrap a kubernetes cluster and install KubeBlocks for playground. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_playground_destroy.mdx b/docs/en/release-1_0_1/cli/kbcli_playground_destroy.mdx new file mode 100644 index 00000000..9f9f78d1 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_playground_destroy.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli playground destroy +--- + +Destroy the playground KubeBlocks and kubernetes cluster. + +``` +kbcli playground destroy [flags] +``` + +### Examples + +``` + # destroy playground cluster + kbcli playground destroy +``` + +### Options + +``` + --auto-approve Skip interactive approval before destroying the playground + -h, --help help for destroy + --purge Purge all resources before destroying kubernetes cluster, delete all clusters created by KubeBlocks and uninstall KubeBlocks. (default true) + --timeout duration Time to wait for destroying KubeBlocks, such as --timeout=10m (default 10m0s) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx b/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx new file mode 100644 index 00000000..9e2924b2 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx @@ -0,0 +1,87 @@ +--- +title: kbcli playground init +--- + +Bootstrap a kubernetes cluster and install KubeBlocks for playground. + +### Synopsis + +Bootstrap a kubernetes cluster and install KubeBlocks for playground. + + If no cloud provider is specified, a k3d cluster named kb-playground will be created on local host, otherwise a kubernetes cluster will be created on the specified cloud. Then KubeBlocks will be installed on the created kubernetes cluster, and an apecloud-mysql cluster named mycluster will be created. + +``` +kbcli playground init [flags] +``` + +### Examples + +``` + # create a k3d cluster on local host and install KubeBlocks + kbcli playground init + + # create an AWS EKS cluster and install KubeBlocks, the region is required + kbcli playground init --cloud-provider aws --region us-west-1 + + # after init, run the following commands to experience KubeBlocks quickly + # list database cluster and check its status + kbcli cluster list + + # get cluster information + kbcli cluster describe mycluster + + # connect to database + kbcli exec -it mycluster-mysql-0 bash + mysql -h 127.1 -u root -p$MYSQL_ROOT_PASSWORD + + # view the Grafana + kbcli dashboard open kubeblocks-grafana + + # destroy playground + kbcli playground destroy +``` + +### Options + +``` + --auto-approve Skip interactive approval during the initialization of playground + --cloud-provider string Cloud provider type, one of [local aws] (default "local") + --cluster-type string Specify the cluster type to create, use 'kbcli cluster create --help' to get the available cluster type. (default "mysql") + -h, --help help for init + --k3d-proxy-image string Specify k3d proxy image if you want to init playground locally (default "docker.io/apecloud/k3d-proxy:5.4.4") + --k3s-image string Specify k3s image that you want to use for the nodes if you want to init playground locally (default "rancher/k3s:v1.23.8-k3s1") + --region string The region to create kubernetes cluster + --timeout duration Time to wait for init playground, such as --timeout=10m (default 10m0s) + --version string KubeBlocks version +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin.mdx new file mode 100644 index 00000000..9d132e7e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin.mdx @@ -0,0 +1,55 @@ +--- +title: kbcli plugin +--- + +Provides utilities for interacting with plugins. + +### Synopsis + +Provides utilities for interacting with plugins. + + Plugins provide extended functionality that is not part of the major command-line distribution. + +### Options + +``` + -h, --help help for plugin +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli plugin describe](kbcli_plugin_describe.md) - Describe a plugin +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes +* [kbcli plugin install](kbcli_plugin_install.md) - Install kbcli or kubectl plugins +* [kbcli plugin list](kbcli_plugin_list.md) - List all visible plugin executables on a user's PATH +* [kbcli plugin search](kbcli_plugin_search.md) - Search kbcli or kubectl plugins +* [kbcli plugin uninstall](kbcli_plugin_uninstall.md) - Uninstall kbcli or kubectl plugins +* [kbcli plugin upgrade](kbcli_plugin_upgrade.md) - Upgrade kbcli or kubectl plugins + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_describe.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_describe.mdx new file mode 100644 index 00000000..8d57c2fd --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_describe.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli plugin describe +--- + +Describe a plugin + +``` +kbcli plugin describe [flags] +``` + +### Examples + +``` + # Describe a plugin + kbcli plugin describe [PLUGIN] + + # Describe a plugin with index + kbcli plugin describe [INDEX/PLUGIN] +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_index.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_index.mdx new file mode 100644 index 00000000..3a404811 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_index.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli plugin index +--- + +Manage custom plugin indexes + +### Synopsis + +Manage which repositories are used to discover plugins and install plugins from + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. +* [kbcli plugin index add](kbcli_plugin_index_add.md) - Add a new index +* [kbcli plugin index delete](kbcli_plugin_index_delete.md) - Remove a configured index +* [kbcli plugin index list](kbcli_plugin_index_list.md) - List configured indexes +* [kbcli plugin index update](kbcli_plugin_index_update.md) - update all configured indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_index_add.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_index_add.mdx new file mode 100644 index 00000000..66ae11dc --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_index_add.mdx @@ -0,0 +1,55 @@ +--- +title: kbcli plugin index add +--- + +Add a new index + +``` +kbcli plugin index add [flags] +``` + +### Examples + +``` + # Add a new plugin index + kbcli plugin index add default https://github.com/apecloud/block-index.git + + kbcli plugin index add krew https://github.com/kubernetes-sigs/krew-index.git +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_index_delete.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_index_delete.mdx new file mode 100644 index 00000000..47c79055 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_index_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin index delete +--- + +Remove a configured index + +``` +kbcli plugin index delete [flags] +``` + +### Examples + +``` + # Delete a plugin index + kbcli plugin index delete myIndex +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_index_list.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_index_list.mdx new file mode 100644 index 00000000..1931e4d0 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_index_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin index list +--- + +List configured indexes + +``` +kbcli plugin index list [flags] +``` + +### Examples + +``` + # List all configured plugin indexes + kbcli plugin index list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_index_update.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_index_update.mdx new file mode 100644 index 00000000..0ab0caaf --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_index_update.mdx @@ -0,0 +1,46 @@ +--- +title: kbcli plugin index update +--- + +update all configured indexes + +``` +kbcli plugin index update [flags] +``` + +### Options + +``` + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_install.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_install.mdx new file mode 100644 index 00000000..54933733 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_install.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli plugin install +--- + +Install kbcli or kubectl plugins + +``` +kbcli plugin install [flags] +``` + +### Examples + +``` + # install a kbcli or kubectl plugin by name + kbcli plugin install [PLUGIN] + + # install a kbcli or kubectl plugin by name and index + kbcli plugin install [INDEX/PLUGIN] +``` + +### Options + +``` + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_list.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_list.mdx new file mode 100644 index 00000000..945ec144 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin list +--- + +List all visible plugin executables on a user's PATH + +``` +kbcli plugin list +``` + +### Examples + +``` + # List all available plugins file on a user's PATH. + kbcli plugin list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_search.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_search.mdx new file mode 100644 index 00000000..d294f743 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_search.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli plugin search +--- + +Search kbcli or kubectl plugins + +### Synopsis + +Search kbcli or kubectl plugins by keywords + +``` +kbcli plugin search [flags] +``` + +### Examples + +``` + # search a kbcli or kubectl plugin with keywords + kbcli plugin search keyword1 keyword2 +``` + +### Options + +``` + -h, --help help for search + --limit int Limit the number of plugin descriptions to output (default 50) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_uninstall.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_uninstall.mdx new file mode 100644 index 00000000..f96801fa --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_uninstall.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin uninstall +--- + +Uninstall kbcli or kubectl plugins + +``` +kbcli plugin uninstall [flags] +``` + +### Examples + +``` + # uninstall a kbcli or kubectl plugin by name + kbcli plugin uninstall [PLUGIN] +``` + +### Options + +``` + -h, --help help for uninstall +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_plugin_upgrade.mdx b/docs/en/release-1_0_1/cli/kbcli_plugin_upgrade.mdx new file mode 100644 index 00000000..3ee5e590 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_plugin_upgrade.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli plugin upgrade +--- + +Upgrade kbcli or kubectl plugins + +``` +kbcli plugin upgrade [flags] +``` + +### Examples + +``` + # upgrade installed plugins with specified name + kbcli plugin upgrade myplugin + + # upgrade installed plugin to a newer version + kbcli plugin upgrade --all +``` + +### Options + +``` + --all Upgrade all installed plugins + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_report.mdx b/docs/en/release-1_0_1/cli/kbcli_report.mdx new file mode 100644 index 00000000..ca61eab7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_report.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli report +--- + +Report kubeblocks or cluster info. + +### Options + +``` + -h, --help help for report +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli report cluster](kbcli_report_cluster.md) - Report Cluster information +* [kbcli report kubeblocks](kbcli_report_kubeblocks.md) - Report KubeBlocks information, including deployments, events, logs, etc. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_report_cluster.mdx b/docs/en/release-1_0_1/cli/kbcli_report_cluster.mdx new file mode 100644 index 00000000..773d6e1e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_report_cluster.mdx @@ -0,0 +1,79 @@ +--- +title: kbcli report cluster +--- + +Report Cluster information + +``` +kbcli report cluster NAME [-f file] [-with-logs] [-mask] [flags] +``` + +### Examples + +``` + # report KubeBlocks status + kbcli report cluster mycluster + + # report KubeBlocks cluster information to file + kbcli report cluster mycluster -f filename + + # report KubeBlocks cluster information with logs + kbcli report cluster mycluster --with-logs + + # report KubeBlocks cluster information with logs and mask sensitive info + kbcli report cluster mycluster --with-logs --mask + + # report KubeBlocks cluster information with logs since 1 hour ago + kbcli report cluster mycluster --with-logs --since 1h + + # report KubeBlocks cluster information with logs since given time + kbcli report cluster mycluster --with-logs --since-time 2023-05-23T00:00:00Z + + # report KubeBlocks cluster information with logs for all containers + kbcli report cluster mycluster --with-logs --all-containers +``` + +### Options + +``` + --all-containers Get all containers' logs in the pod(s). Byt default, only the main container (the first container) will have logs recorded. + -f, --file string zip file for output + -h, --help help for cluster + --mask mask sensitive info for secrets and configmaps (default true) + -n, --namespace string KubeBlocks namespace + -o, --output string Output format. One of: json|yaml. (default "json") + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. + --with-logs include pod logs + --with-secrets include secrets +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_report_kubeblocks.mdx b/docs/en/release-1_0_1/cli/kbcli_report_kubeblocks.mdx new file mode 100644 index 00000000..c49e0d73 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_report_kubeblocks.mdx @@ -0,0 +1,70 @@ +--- +title: kbcli report kubeblocks +--- + +Report KubeBlocks information, including deployments, events, logs, etc. + +``` +kbcli report kubeblocks [-f file] [--with-logs] [--mask] [flags] +``` + +### Examples + +``` + # report KubeBlocks status + kbcli report kubeblocks + + # report KubeBlocks information to file + kbcli report kubeblocks -f filename + + # report KubeBlocks information with logs + kbcli report kubeblocks --with-logs + + # report KubeBlocks information with logs and mask sensitive info + kbcli report kubeblocks --with-logs --mask +``` + +### Options + +``` + --all-containers Get all containers' logs in the pod(s). Byt default, only the main container (the first container) will have logs recorded. + -f, --file string zip file for output + -h, --help help for kubeblocks + --mask mask sensitive info for secrets and configmaps (default true) + -n, --namespace string KubeBlocks namespace + -o, --output string Output format. One of: json|yaml. (default "json") + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. + --with-logs include pod logs + --with-secrets include secrets +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace.mdx b/docs/en/release-1_0_1/cli/kbcli_trace.mdx new file mode 100644 index 00000000..8d4ad716 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace.mdx @@ -0,0 +1,47 @@ +--- +title: kbcli trace +--- + +trace management command + +### Options + +``` + -h, --help help for trace +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli trace create](kbcli_trace_create.md) - create a trace. +* [kbcli trace delete](kbcli_trace_delete.md) - Delete a trace. +* [kbcli trace list](kbcli_trace_list.md) - list all traces. +* [kbcli trace update](kbcli_trace_update.md) - update a trace. +* [kbcli trace watch](kbcli_trace_watch.md) - watch a trace. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace_create.mdx b/docs/en/release-1_0_1/cli/kbcli_trace_create.mdx new file mode 100644 index 00000000..04ebcd28 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace_create.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli trace create +--- + +create a trace. + +``` +kbcli trace create trace-name [flags] +``` + +### Examples + +``` + # create a trace for cluster has the same name 'pg-cluster' + kbcli trace create pg-cluster + + # create a trace for cluster has the name of 'pg-cluster' + kbcli trace create pg-cluster-trace --cluster-name pg-cluster + + # create a trace with custom locale, stateEvaluationExpression + kbcli trace create pg-cluster-trace --locale zh_cn --cel-state-evaluation-expression "has(object.status.phase) && object.status.phase == \"Running\"" +``` + +### Options + +``` + --cel-state-evaluation-expression string Specify CEL state evaluation expression. + --cluster-name string Specify target cluster name. + --depth int Specify object tree depth to display. + -h, --help help for create + --locale string Specify locale. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace_delete.mdx b/docs/en/release-1_0_1/cli/kbcli_trace_delete.mdx new file mode 100644 index 00000000..262c1701 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli trace delete +--- + +Delete a trace. + +``` +kbcli trace delete trace-name [flags] +``` + +### Examples + +``` + # Delete a trace + kbcli trace delete pg-cluster +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace_list.mdx b/docs/en/release-1_0_1/cli/kbcli_trace_list.mdx new file mode 100644 index 00000000..b1c42c02 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace_list.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli trace list +--- + +list all traces. + +``` +kbcli trace list [flags] +``` + +### Examples + +``` + # list all traces + kbcli trace list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace_update.mdx b/docs/en/release-1_0_1/cli/kbcli_trace_update.mdx new file mode 100644 index 00000000..53ba1c71 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace_update.mdx @@ -0,0 +1,62 @@ +--- +title: kbcli trace update +--- + +update a trace. + +``` +kbcli trace update trace-name [flags] +``` + +### Examples + +``` + # update a trace with custom locale, stateEvaluationExpression + kbcli trace update pg-cluster-trace --locale zh_cn --cel-state-evaluation-expression "has(object.status.phase) && object.status.phase == \"Running\"" +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --cel-state-evaluation-expression string Specify CEL state evaluation expression. + --depth int Specify object tree depth to display. (default -1) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for update + --locale string Specify locale. + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_trace_watch.mdx b/docs/en/release-1_0_1/cli/kbcli_trace_watch.mdx new file mode 100644 index 00000000..e1d35f1e --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_trace_watch.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli trace watch +--- + +watch a trace. + +``` +kbcli trace watch trace-name [flags] +``` + +### Examples + +``` + # watch a trace + kbcli trace watch pg-cluster-trace +``` + +### Options + +``` + -h, --help help for watch +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_version.mdx b/docs/en/release-1_0_1/cli/kbcli_version.mdx new file mode 100644 index 00000000..29e1e7f7 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_version.mdx @@ -0,0 +1,47 @@ +--- +title: kbcli version +--- + +Print the version information, include kubernetes, KubeBlocks and kbcli version. + +``` +kbcli version [flags] +``` + +### Options + +``` + -h, --help help for version + --verbose print detailed kbcli information +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/01-overview.mdx new file mode 100644 index 00000000..fe2c3fc7 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/01-overview.mdx @@ -0,0 +1,66 @@ +--- +title: Overview of KubeBlocks Elasticsearch Addon +description: Learn about the features and capabilities of the KubeBlocks Elasticsearch addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [Elasticsearch, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks Elasticsearch Addon + +## Overview + +Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. Each Elasticsearch cluster consists of one or more nodes, with each node assuming specific roles. + +### Node Roles + +| Role | Description | +|------|-------------| +| **master** | Manages cluster state and coordinates operations | +| **data** | Stores data and handles data-related operations | +| **data_content** | Stores document data | +| **data_hot** | Handles recent, frequently accessed data | +| **data_warm** | Stores less frequently accessed data | +| **data_cold** | Handles rarely accessed data | +| **data_frozen** | Manages archived data | +| **ingest** | Processes documents before indexing | +| **ml** | Runs machine learning jobs | +| **remote_cluster_client** | Connects to remote clusters | +| **transform** | Handles data transformations | + +[See Elasticsearch Node Roles documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) + +## Key features + +### Lifecycle Management + +KubeBlocks simplifies Elasticsearch operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for Elasticsearch instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Custom Services** | Expose specialized database endpoints | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + +### Supported Versions + +KubeBlocks Elasticsearch Addon supports these Elasticsearch versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 7.x | 7.7.1,7.8.1,7.10.1 | +| 8.x | 8.1.3, 8.8.2 | + + +The list of supported versions can be found by following command: +``` +kubectl get cmpv elasticsearch +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/02-quickstart.mdx new file mode 100644 index 00000000..b401cb43 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/02-quickstart.mdx @@ -0,0 +1,325 @@ +--- +title: Elasticsearch Quickstart +description: Comprehensive guide to deploying and managing Elasticsearch ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, Elasticsearch, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Elasticsearch Quickstart + +This guide provides a comprehensive walkabout for deploying and managing Elasticsearch ReplicaSet Clusters using the **KubeBlocks Elasticsearch Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify Elasticsearch Add-on + +The Elasticsearch Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep elasticsearch +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-elasticsearch kb-system 1 2025-05-21 deployed elasticsearch-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/elasticsearch --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-elasticsearch kubeblocks-addons/elasticsearch --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search elasticsearch + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install elasticsearch --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + elasticsearch 0.9.0 kubeblocks + elasticsearch 0.9.1 kubeblocks + elasticsearch 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable elasticsearch + # Disable Add-on + kbcli addon disable elasticsearch + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the Elasticsearch Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +## Deploy a Elasticsearch Cluster + +For development and testing purposes, you can deploy a single-node cluster where one node handles all roles. + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/elasticsearch/cluster-single-node.yaml +``` + +This creates: +- A Elasticsearch Cluster with 1 component, where one replica handles all roles. +- Default resource allocations (1 CPU, 2Gi memory) +- 20Gi persistent storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-singlenode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: mdit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + replicas: 1 + configs: + - name: es-cm + variables: + mode: "single-node" + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +## Verify Cluster Status + +When deploying a Elasticsearch Cluster with 1 replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational + +Check status using either method: + + + +```bash +kubectl get cluster es-singlenode -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +es-singlenode Delete Running 49s + +kubectl get pods -l app.kubernetes.io/instance=es-singlenode -n demo +NAME READY STATUS RESTARTS AGE +es-singlenode-mdit-0 3/3 Running 0 58s +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe es-singlenode -n demo + +Name: es-singlenode Created Time: May 19,2025 20:34 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +mdit es-singlenode-mdit-http.demo.svc.cluster.local:9200 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mdit 8.8.2 es-singlenode-mdit-0 Running kbv10-control-plane/172.19.0.2 May 19,2025 20:34 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mdit 1 / 1 2Gi / 2Gi data:20Gi standard + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mdit elasticsearch-8-1.0.0 docker.io/library/elasticsearch:8.8.2 + docker.io/prometheuscommunity/elasticsearch-exporter:v1.7.0 + docker.io/apecloud/curl-jq:0.1.0 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo es-singlenode +``` + + + + + +## Stop the Elasticsearch Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: elasticsearch-stop + namespace: demo + spec: + clusterName: es-singlenode + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster es-singlenode -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ``` + + + +## Start the Elasticsearch Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: elasticsearch-start + namespace: demo + spec: + clusterName: es-singlenode + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster es-singlenode -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ``` + + + + +## Delete Elasticsearch Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster es-singlenode -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster es-singlenode -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..a589d932 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,307 @@ +--- +title: Elasticsearch Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage Elasticsearch Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, Elasticsearch, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Elasticsearch Cluster Lifecycle Management + +This guide demonstrates how to manage a Elasticsearch Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a Elasticsearch Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + + Option 1: OpsRequest API + + Create a Stop operation request: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-stop-ops + namespace: demo + spec: + clusterName: es-multinode + type: Stop + ``` + + + + + Option 2: Cluster API Patch + + Modify the cluster spec directly by patching the stop field: + + ```bash + kubectl patch cluster es-multinode -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + } + ]' + ``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Stopping 8m6s + es-multinode Delete Stopped 9m41s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=es-multinode -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=es-multinode -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-es-multinode-dit-0 Bound pvc-aa8136e5-a69a-4117-bb4c-8978978bb77f 20Gi RWO standard 8m25s + data-es-multinode-dit-1 Bound pvc-408fe4d5-b3a9-4984-b6e5-48ec133307eb 20Gi RWO standard 8m25s + data-es-multinode-dit-2 Bound pvc-cf6c3c7c-bb5f-4fa6-8dff-33e0862f8ef9 20Gi RWO standard 8m25s + data-es-multinode-master-0 Bound pvc-5793e794-8c91-4bba-b6e8-52c414ec0ade 20Gi RWO standard 8m25s + data-es-multinode-master-1 Bound pvc-044dae8d-82ee-41f3-867d-c8f27ec08fbe 20Gi RWO standard 8m25s + data-es-multinode-master-2 Bound pvc-2af7cedb-2f5f-4846-be43-ff6da8109880 20Gi RWO standard 8m25s + ``` +### Starting the Cluster + +Starting a stopped Elasticsearch Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + + Initiate a Start operation request: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-start-ops + namespace: demo + spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: es-multinode + type: Start + ``` + + + + + + Modify the cluster spec to resume operation: + 1. Set stop: false, or + 2. Remove the stop field entirely + + ```bash + kubectl patch cluster es-multinode -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Updating 24m + es-multinode Delete Running 24m + es-multinode Delete Running 24m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + es-multinode-dit-0 3/3 Running 0 24m + es-multinode-dit-1 3/3 Running 0 24m + es-multinode-dit-2 3/3 Running 0 24m + es-multinode-master-0 3/3 Running 0 24m + es-multinode-master-1 3/3 Running 0 24m + es-multinode-master-2 3/3 Running 0 24m + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Check Components** + +There are five components in Milvus Cluster. To get the list of components, +```bash +kubectl get cluster -n demo es-multinode -oyaml | yq '.spec.componentSpecs[].name' +``` + +Expected Output: +```text +dit +master +``` + + +**Restart Proxy via OpsRequest API** + +List specific components to be restarted: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: es-multinode-restart-ops + namespace: demo +spec: + clusterName: es-multinode + type: Restart + restart: + - componentName: dit +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest es-multinode-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-restart-ops Restart es-multinode Running 0/3 8s + es-multinode-restart-ops Restart es-multinode Running 1/3 59s + es-multinode-restart-ops Restart es-multinode Running 2/3 117s + es-multinode-restart-ops Restart es-multinode Running 3/3 2m55s + es-multinode-restart-ops Restart es-multinode Running 3/3 2m55s + es-multinode-restart-ops Restart es-multinode Succeed 3/3 2m55s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode + ``` + Note: Pods will show new creation timestamps after restart. Only pods belongs to component `dit` have been restarted. + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a Elasticsearch Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your Elasticsearch Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..aeb8a817 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,176 @@ +--- +title: Vertical Scaling in a Elasticsearch Cluster +description: Learn how to perform vertical scaling in a Elasticsearch Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, Elasticsearch, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for Elasticsearch Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a Elasticsearch Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for Elasticsearch instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks ensures minimal impact during scaling operations by following a controlled, role-aware update strategy: +**Role-Aware Replicas (Primary/Secondary Replicas)** +- Secondary replicas update first – Non-leader pods are upgraded to minimize disruption. +- Primary updates last – Only after all secondaries are healthy does the primary pod restart. +- Cluster state progresses from Updating → Running once all replicas are stable. + +**Role-Unaware Replicas (Ordinal-Based Scaling)** +If replicas have no defined roles, updates follow Kubernetes pod ordinal order: +- Highest ordinal first (e.g., pod-2 → pod-1 → pod-0) to ensure deterministic rollouts. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Pods are updated in pod ordinal order, from highest to lowest, (e.g., pod-2 → pod-1 → pod-0) +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the elasticsearch-broker component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-vscale-ops + namespace: demo + spec: + clusterName: es-multinode + type: VerticalScaling + verticalScaling: + - componentName: dit + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops es-multinode-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-vscale-ops VerticalScaling es-multinode Running 0/3 57s + es-multinode-vscale-ops VerticalScaling es-multinode Running 1/3 60s + es-multinode-vscale-ops VerticalScaling es-multinode Running 2/3 118s + es-multinode-vscale-ops VerticalScaling es-multinode Running 3/3 2m51s + es-multinode-vscale-ops VerticalScaling es-multinode Running 3/3 2m51s + es-multinode-vscale-ops VerticalScaling es-multinode Succeed 3/3 2m51s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 3 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting Elasticsearch parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe es-multinode -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +dit 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the Elasticsearch Cluster along with its namespace: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a Elasticsearch Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the elasticsearch component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your Elasticsearch Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..6a89af60 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,234 @@ +--- +title: Horizontal Scaling of Elasticsearch Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a Elasticsearch cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, Elasticsearch, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for Elasticsearch Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a Elasticsearch cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running`. +2. Cluster status changes from `Updating` to `Running` + + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the Elasticsearch cluster by adding 1 replica to elasticsearch component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-scale-out-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops es-multinode-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-scale-out-ops HorizontalScaling es-multinode Running 0/1 9s + es-multinode-scale-out-ops HorizontalScaling es-multinode Running 1/1 16s + es-multinode-scale-out-ops HorizontalScaling es-multinode Succeed 1/1 16s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 4 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the Elasticsearch cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 4m28s +es-multinode-dit-1 3/3 Running 0 5m27s +es-multinode-dit-2 3/3 Running 0 6m25s +es-multinode-dit-3 3/3 Running 0 1m25s +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the Elasticsearch cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-scale-in-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops es-multinode-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-scale-in-ops HorizontalScaling es-multinode Running 0/1 8s + es-multinode-scale-in-ops HorizontalScaling es-multinode Running 1/1 24s + es-multinode-scale-in-ops HorizontalScaling es-multinode Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 3 # decrease replicas to scale-in + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + + +### Verify Scale-In + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +Example Output (three Pod): +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 8m20s +es-multinode-dit-1 3/3 Running 0 9m19s +es-multinode-dit-2 3/3 Running 0 10m +``` + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the Elasticsearch cluster along with its namespace: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a Elasticsearch cluster. +- Perform scale-in operations to remove replicas from a Elasticsearch cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..01e80bac --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx @@ -0,0 +1,251 @@ +--- +title: Expanding Volume in a Elasticsearch Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a Elasticsearch cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, Elasticsearch, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a Elasticsearch Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a Elasticsearch cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a Elasticsearch Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage Elasticsearch clusters. Below is an example configuration for deploying a Elasticsearch cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-multinode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: dit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # use key `roles` to specify roles this component assume + roles: data,ingest,transform + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # use key `roles` to specify roles this component assume + roles: master + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the elasticsearch component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expand-volume-ops + namespace: demo + spec: + clusterName: es-multinode + type: VolumeExpansion + volumeExpansion: + - componentName: dit + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops es-multinode-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: dit + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe es-multinode -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +dit 1 / 1 1Gi / 1Gi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS +data-es-multinode-dit-0 Bound pvc-uuid 30Gi RWO +data-es-multinode-dit-1 Bound pvc-uuid 30Gi RWO +data-es-multinode-dit-2 Bound pvc-uuid 30Gi RWO +``` + +## Cleanup +To remove all created resources, delete the Elasticsearch cluster along with its namespace: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your Elasticsearch cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..ffcfb6fe --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,306 @@ +--- +title: Create and Destroy Elasticsearch Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage Elasticsearch services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, Elasticsearch, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage Elasticsearch Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage Elasticsearch Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing Elasticsearch services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## View Network Services +List the Services created for the Elasticsearch cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +es-multinode-dit-http ClusterIP 10.96.224.72 9200/TCP 56m +es-multinode-master-http ClusterIP 10.96.153.35 9200/TCP 56m +``` + +## Expose Elasticsearch Service + +External service addresses enable public internet access to Elasticsearch, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the Elasticsearch service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: es-multinode + expose: + - componentName: master + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + ports: + - name: es-http + port: 9200 + protocol: TCP + targetPort: es-http + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops es-multinode-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-expose-enable-ops Expose es-multinode Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: es-multinode + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: elasticsearch + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: master + name: master-internet + serviceName: master-internet + spec: + ports: + - name: es-http + nodePort: 32751 + port: 9200 + protocol: TCP + targetPort: es-http + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + + ```text + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +es-multinode-dit-http ClusterIP 10.96.224.72 9200/TCP 59m +es-multinode-master-http ClusterIP 10.96.153.35 9200/TCP 59m +es-multinode-master-internet LoadBalancer 10.96.38.72 9200:30998/TCP 19s +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expose-disable-ops + namespace: demo + spec: + clusterName: es-multinode + expose: + - componentName: master + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops es-multinode-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-expose-disable-ops Expose es-multinode Succeed 1/1 16s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Running 26m + ``` + + + +### Verify Service Removal + +Ensure that the 'es-multinode-elasticsearch-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +Expected Result: The 'es-multinode-elasticsearch-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the Elasticsearch cluster along with its namespace: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a Elasticsearch service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing Elasticsearch services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..03d1cf45 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,134 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed Elasticsearch Clusters +description: Learn how to decommission (take offline) a specific Pod in a Elasticsearch cluster managed by KubeBlocks. +keywords: [KubeBlocks, Elasticsearch, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission Elasticsearch Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed Elasticsearch Clusters + +This guide explains how to decommission (take offline) specific Pods in Elasticsearch clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + +To decommission a specific Pod (e.g., 'es-multinode-dit-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-decommission-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + scaleIn: + onlineInstancesToOffline: + - 'es-multinode-dit-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops es-multinode-decommission-ops -n demo -w + ``` + Example Output: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-decommission-ops HorizontalScaling es-multinode Running 0/1 8s + es-multinode-decommission-ops HorizontalScaling es-multinode Running 1/1 31s + es-multinode-decommission-ops HorizontalScaling es-multinode Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 2 # explected replicas after decommission + offlineInstances: + - es-multinode-dit-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 2/2 Running 0 24m +es-multinode-dit-2 2/2 Running 0 2m1s +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..16f23015 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,197 @@ +--- +title: Observability for Elasticsearch Clusters with the Prometheus Operator +description: Learn how to set up observability for Elasticsearch Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, Elasticsearch, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for Elasticsearch Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Elasticsearch Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for Elasticsearch clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in Elasticsearch exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a Elasticsearch Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint + +```bash +kubectl -n demo exec -it pods/es-multinode-dit-0 -- \ + curl -s http://127.0.0.1:9114/metrics | head -n 50 + +kubectl -n demo exec -it pods/es-multinode-master-0 -- \ + curl -s http://127.0.0.1:9114/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: elasticsearch-jmx-pod-monitor + namespace: demo + labels: # match labels in `prometheus.spec.podMonitorSelector` + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/instance: es-multinode +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where Elasticsearch runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/es-multinode-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=elasticsearch_clusterinfo_up{job="kubeblocks"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "elasticsearch_clusterinfo_up", + "container": "exporter", + "endpoint": "metrics", + "instance": "10.244.0.49:9114", + "job": "kubeblocks", + "namespace": "demo", + "pod": "es-multinode-master-2", + "url": "http://localhost:9200" + }, + "value": [ + 1747666760.443, + "1" + ] + }, +... // more lines ommited +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks Elasticsearch dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Import dashboard from [Elasticsearch Dashboard](https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/addons/elasticsearch/dashboards/elasticsearch.json) + +![elasticsearch-monitoring-grafana-dashboard.png](/img/docs/en/elasticsearch-monitoring-grafana-dashboard.png) +Figure 1. Elasticsearch dashboard + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +kubectl delete podmonitor es-multinode-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a Elasticsearch cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the Elasticsearch exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your Elasticsearch databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_category_.yml new file mode 100644 index 00000000..b94a2569 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for Elasticsearch Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..1b1edb8c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx @@ -0,0 +1,66 @@ +KubeBlocks uses a declarative approach for managing Elasticsearch Clusters. +Below is an example configuration for deploying a Elasticsearch Cluster with +create a cluster with replicas for different roles. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-multinode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: dit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # use key `roles` to specify roles this component assume + roles: data,ingest,transform + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # use key `roles` to specify roles this component assume + roles: master + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..8360fb0f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx @@ -0,0 +1,36 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster es-multinode -n demo -w +``` + +Expected Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +es-multinode Delete Creating 10s +es-multinode Delete Updating 41s +es-multinode Delete Running 42s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=es-multinode -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 6m21s +es-multinode-dit-1 3/3 Running 0 6m21s +es-multinode-dit-2 3/3 Running 0 6m21s +es-multinode-master-0 3/3 Running 0 6m21s +es-multinode-master-1 3/3 Running 0 6m21s +es-multinode-master-2 3/3 Running 0 6m21s +``` + +Once the cluster status becomes Running, your Elasticsearch cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/01-overview.mdx new file mode 100644 index 00000000..d3fa3c14 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/01-overview.mdx @@ -0,0 +1,43 @@ +--- +title: Overview of KubeBlocks Kafka Addon +description: Learn about the features and capabilities of the KubeBlocks Kafka addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [Kafka, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks Kafka Addon + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +- A broker is a Kafka server that stores data and handles requests from producers and consumers. Kafka clusters consist of multiple brokers, each identified by a unique ID. Brokers work together to distribute and replicate data across the cluster. +- KRaft was introduced in Kafka 3.3.1 in October 2022 as an alternative to Zookeeper. A subset of brokers are designated as controllers, and these controllers provide the consensus services that used to be provided by Zookeeper. + +## Key features + +### Lifecycle Management + +KubeBlocks simplifies Kafka operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for Kafka instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Custom Services** | Expose specialized database endpoints | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + +### Supported Versions + +KubeBlocks Kafka Addon supports these Kafka versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 3.x | 3.3.2 | +| 2.x | 2.7.0 | diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/02-quickstart.mdx new file mode 100644 index 00000000..a930ac2a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/02-quickstart.mdx @@ -0,0 +1,472 @@ +--- +title: Kafka Quickstart +description: Comprehensive guide to deploying and managing Kafka ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, Kafka, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Kafka Quickstart + +This guide provides a comprehensive walkabout for deploying and managing Kafka ReplicaSet Clusters using the **KubeBlocks Kafka Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify Kafka Add-on + +The Kafka Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep kafka +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-kafka kb-system 1 2025-05-21 deployed kafka-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/kafka --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-kafka kubeblocks-addons/kafka --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search kafka + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install kafka --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + kafka 0.9.0 kubeblocks + kafka 0.9.1 kubeblocks + kafka 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable kafka + # Disable Add-on + kbcli addon disable kafka + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the Kafka Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +## Deploy a Kafka Cluster + +Deploy a basic Kafka Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/kafka/cluster-separated.yaml +``` + +This creates: +- A Kafka Cluster with 3 components, kafka controller with 1 replica, kafka broker with 1 replicas and kafka exporter with 1 replica. +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `kafaka` to create a Kafka Cluster + clusterDef: kafka + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # - combined: combined Kafka controller (KRaft) and broker in one Component + # - combined_monitor: combined mode with monitor component + # - separated: separated KRaft and Broker Components. + # - separated_monitor: separated mode with monitor component + # Valid options are: [combined,combined_monitor,separated,separated_monitor] + topology: separated_monitor + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP # use this ENV to set BROKER HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP # use this ENV to set CONTOLLER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + # Whether to enable direct Pod IP address access mode. + # - If set to 'true', Kafka clients will connect to Brokers using the Pod IP address directly. + # - If set to 'false', Kafka clients will connect to Brokers using the Headless Service's FQDN. + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +## Verify Cluster Status + +When deploying a Kafka Cluster with 3 replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational + +Check status using either method: + + + +```bash +kubectl get cluster kafka-separated-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +kafka-separated-cluster kafka Delete Running 2m48s + +kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 2m33s +kafka-separated-cluster-kafka-controller-0 2/2 Running 0 2m58s +kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 2m9s +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe kafka-separated-cluster -n demo + +Name: kafka-separated-cluster Created Time: May 19,2025 16:56 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo kafka separated_monitor Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +kafka-broker kafka-separated-cluster-kafka-broker-advertised-listener-0.demo.svc.cluster.local:9092 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +kafka-broker 3.3.2 kafka-separated-cluster-kafka-broker-0 Running zone-x x.y.z May 19,2025 16:57 UTC+0800 +kafka-controller 3.3.2 kafka-separated-cluster-kafka-controller-0 Running zone-x x.y.z May 19,2025 16:56 UTC+0800 +kafka-exporter 1.6.0 kafka-separated-cluster-kafka-exporter-0 Running zone-x x.y.z May 19,2025 16:57 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +kafka-controller 500m / 500m 512Mi / 512Mi metadata:1Gi +kafka-broker 500m / 500m 512Mi / 512Mi data:20Gi + metadata:1Gi +kafka-exporter 100m / 500m 214748364800m / 1Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +kafka-controller kafka-controller-1.0.0 docker.io/bitnami/kafka:3.3.2-debian-11-r54 + docker.io/bitnami/jmx-exporter:0.18.0-debian-11-r20 +kafka-broker kafka-broker-1.0.0 docker.io/bitnami/kafka:3.3.2-debian-11-r54 + docker.io/bitnami/jmx-exporter:0.18.0-debian-11-r20 +kafka-exporter kafka-exporter-1.0.0 docker.io/bitnami/kafka-exporter:1.6.0-debian-11-r67 + +Show cluster events: kbcli cluster list-events -n demo kafka-separated-cluster +``` + + + + +## Access Kafka Cluster + +**Step 1. Get the address of the Kafka Services** +```bash +kubectl get svc -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +Expected Output: +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.131.175 9092/TCP 5m8s +``` +The service name is `kafka-separated-cluster-kafka-broker-advertised-listener-0` in namespace `demo`. + +**Step 2. Connect to the Kafka cluster with the port No.** + +1. Start client pod. + + ```bash + kubectl run kafka-producer --restart='Never' --image docker.io/bitnami/kafka:3.3.2-debian-11-r54 --command -- sleep infinity + kubectl run kafka-consumer --restart='Never' --image docker.io/bitnami/kafka:3.3.2-debian-11-r54 --command -- sleep infinity + ``` + +2. Log in to kafka-producer. + + ```bash + kubectl exec -ti kafka-producer -- bash + ``` + +3. Create topic. + + ```bash + kafka-topics.sh --create --topic quickstart-events --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + +4. Create producer. + + ```bash + kafka-console-producer.sh --topic quickstart-events --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + +5. Enter:"Hello, KubeBlocks" and press Enter. + +6. Start a new terminal session and login to kafka-consumer. + + ```bash + kubectl exec -ti kafka-consumer -- bash + ``` + +7. Create consumer and specify consuming topic, and consuming message from the beginning. + + ```bash + kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + + And you get the output 'Hello, KubeBlocks'. + + +## Stop the Kafka Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/kafka/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-stop + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + +## Start the Kafka Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-start + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + + +## Delete Kafka Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster kafka-separated-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster kafka-separated-cluster -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..8b84b64f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,310 @@ +--- +title: Kafka Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage Kafka Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, Kafka, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Kafka Cluster Lifecycle Management + +This guide demonstrates how to manage a Kafka Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a Kafka Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + + Option 1: OpsRequest API + + Create a Stop operation request: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-stop-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Stop + ``` + + + + + Option 2: Cluster API Patch + + Modify the cluster spec directly by patching the stop field: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Stopping 16m3s + kafka-separated-cluster kafka Delete Stopped 16m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=kafka-separated-cluster -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-kafka-separated-cluster-kafka-broker-0 Bound pvc-ddd54e0f-414a-49ed-8e17-41e9f5082af1 20Gi RWO standard 14m + metadata-kafka-separated-cluster-kafka-broker-0 Bound pvc-d63b7d80-cac5-41b9-b694-6a298921003b 1Gi RWO standard 14m + metadata-kafka-separated-cluster-kafka-controller-0 Bound pvc-e6263eb1-405a-4090-b2bb-f92cca0ba36d 1Gi RWO standard 14m + ``` + +### Starting the Cluster + +Starting a stopped Kafka Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + + Initiate a Start operation request: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-start-ops + namespace: demo + spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: kafka-separated-cluster + type: Start + ``` + + + + + + Modify the cluster spec to resume operation: + 1. Set stop: false, or + 2. Remove the stop field entirely + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Updating 24m + kafka-separated-cluster kafka Delete Running 24m + kafka-separated-cluster kafka Delete Running 24m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + kafka-separated-cluster-kafka-broker-0 2/2 Running 0 2m4s + kafka-separated-cluster-kafka-controller-0 2/2 Running 0 104s + kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 84s + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Check Components** + +There are five components in Milvus Cluster. To get the list of components, +```bash +kubectl get cluster -n demo kafka-separated-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +Expected Output: +```text +kafka-controller +kafka-broker +kafka-exporter +``` + + +**Restart Proxy via OpsRequest API** + +List specific components to be restarted: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: kafka-separated-cluster-restart-ops + namespace: demo +spec: + clusterName: kafka-separated-cluster + type: Restart + restart: + - componentName: kafka-broker +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest kafka-separated-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 1/1 22s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 1/1 23s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Succeed 1/1 23s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster + ``` + Note: Pods will show new creation timestamps after restart. Only pods belongs to component `kafka-broker` have been restarted. + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a Kafka Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your Kafka Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..d0a621f0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,174 @@ +--- +title: Vertical Scaling in a Kafka Cluster +description: Learn how to perform vertical scaling in a Kafka Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, Kafka, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for Kafka Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a Kafka Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for Kafka instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks ensures minimal impact during scaling operations by following a controlled, role-aware update strategy: +**Role-Aware Replicas (Primary/Secondary Replicas)** +- Secondary replicas update first – Non-leader pods are upgraded to minimize disruption. +- Primary updates last – Only after all secondaries are healthy does the primary pod restart. +- Cluster state progresses from Updating → Running once all replicas are stable. + +**Role-Unaware Replicas (Ordinal-Based Scaling)** +If replicas have no defined roles, updates follow Kubernetes pod ordinal order: +- Highest ordinal first (e.g., pod-2 → pod-1 → pod-0) to ensure deterministic rollouts. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Pods are updated in pod ordinal order, from highest to lowest, (e.g., pod-2 → pod-1 → pod-0) +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the kafka-broker component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-vscale-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: VerticalScaling + verticalScaling: + - componentName: kafka-broker + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops kafka-separated-cluster-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 0/1 12s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 1/1 13s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 1/1 13s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Succeed 1/1 13s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting Kafka parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe kafka-separated-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +kafka-broker 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the Kafka Cluster along with its namespace: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a Kafka Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the kafka component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your Kafka Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..0f907965 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,227 @@ +--- +title: Horizontal Scaling of Kafka Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a Kafka cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, Kafka, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for Kafka Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a Kafka cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running`. +2. Cluster status changes from `Updating` to `Running` + + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the Kafka cluster by adding 1 replica to kafka component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-scale-out-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops kafka-separated-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Running 0/1 9s + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Running 1/1 16s + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 16s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 2 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/1/replicas", "value": 2}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the Kafka cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 3m7s +kafka-separated-cluster-kafka-broker-1 2/2 Running 0 28s +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the Kafka cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-scale-in-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops kafka-separated-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Running 1/1 24s + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 1 # decrease replicas to scale-in + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/1/replicas", "value": 1}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 5m7s +``` + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the Kafka cluster along with its namespace: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a Kafka cluster. +- Perform scale-in operations to remove replicas from a Kafka cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..4a00b964 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx @@ -0,0 +1,258 @@ +--- +title: Expanding Volume in a Kafka Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a Kafka cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, Kafka, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a Kafka Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a Kafka cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a Kafka Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage Kafka clusters. Below is an example configuration for deploying a Kafka cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: kafka + topology: separated_monitor + componentSpecs: + - name: kafka-broker + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the kafka component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: kafka-broker + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops kafka-separated-cluster-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: kafka-broker + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe kafka-separated-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +kafka-broker 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS +data-kafka-separated-cluster-kafka-broker-0 Bound pvc-uuid 30Gi RWO +data-kafka-separated-cluster-kafka-broker-1 Bound pvc-uuid 30Gi RWO +``` + +## Cleanup +To remove all created resources, delete the Kafka cluster along with its namespace: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your Kafka cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..2bd0f914 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,303 @@ +--- +title: Create and Destroy Kafka Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage Kafka services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, Kafka, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage Kafka Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage Kafka Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing Kafka services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## View Network Services +List the Services created for the Kafka cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.101.247 9092/TCP 19m +``` + +## Expose Kafka Service + +External service addresses enable public internet access to Kafka, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the Kafka service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: kafka-separated-cluster + expose: + - componentName: kafka-broker + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + ports: + - name: kafka-client + port: 9092 + targetPort: kafka-client + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops kafka-separated-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-expose-enable-ops Expose kafka-separated-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: kafka-separated-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: kafka + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: kafka-broker + name: kafka-internet + serviceName: kafka-internet + spec: # defines the behavior of a K8s service. + ipFamilyPolicy: PreferDualStack + ports: + - name: kafka-client + # port to expose + port: 9092 # port 15672 for kafka management console + protocol: TCP + targetPort: kafka-client + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.101.247 9092/TCP 24m +kafka-separated-cluster-kafka-broker-internet LoadBalancer 10.96.180.189 9092:31243/TCP 59s +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + expose: + - componentName: kafka-broker + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops kafka-separated-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-expose-disable-ops Expose kafka-separated-cluster Succeed 1/1 16s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Running 26m + ``` + + + +### Verify Service Removal + +Ensure that the 'kafka-separated-cluster-kafka-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +Expected Result: The 'kafka-separated-cluster-kafka-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the Kafka cluster along with its namespace: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a Kafka service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing Kafka services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..5277ca1d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,163 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed Kafka Clusters +description: Learn how to decommission (take offline) a specific Pod in a Kafka cluster managed by KubeBlocks. +keywords: [KubeBlocks, Kafka, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission Kafka Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed Kafka Clusters + +This guide explains how to decommission (take offline) specific Pods in Kafka clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + + +Before decommissioning a specific pod from a component, make sure this component has more than one replicas. +If not, please scale out the component ahead. + +E.g. you can patch the cluster CR with command, to declare there are 3 replicas in component querynode. + +```bash +kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/1/replicas", + "value": 3 + } +]' +``` + +Wait till all pods are running +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` +Expected Output: +``` +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 18m +kafka-separated-cluster-kafka-broker-1 2/2 Running 0 3m33m +kafka-separated-cluster-kafka-broker-2 2/2 Running 0 2m1s +``` + + +To decommission a specific Pod (e.g., 'kafka-separated-cluster-kafka-broker-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-decommission-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + scaleIn: + onlineInstancesToOffline: + - 'kafka-separated-cluster-kafka-broker-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops kafka-separated-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Running 1/1 31s + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 2 # explected replicas after decommission + offlineInstances: + - kafka-separated-cluster-kafka-broker-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 24m +kafka-separated-cluster-kafka-broker-2 2/2 Running 0 2m1s +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..702778ab --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,248 @@ +--- +title: Observability for Kafka Clusters with the Prometheus Operator +description: Learn how to set up observability for Kafka Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, Kafka, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for Kafka Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Kafka Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for Kafka clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in Kafka exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a Kafka Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Configure Metrics Collection + +### 1. Get Exporter details + +```bash +kubectl get po -n demo kafka-separated-cluster-kafka-broker-0 -oyaml | yq '.spec.containers[] | select(.name=="jmx-exporter") | .ports' +``` + +
+Example Output: + +```text +- containerPort: 5556 + name: metrics + protocol: TCP +``` + +
+ +```bash + kubectl get po -n demo kafka-separated-cluster-kafka-exporter-0 -oyaml | yq '.spec.containers[] | select(.name=="kafka-exporter") | .ports' +``` + +
+Example Output: + +```text +- containerPort: 9308 + name: metrics + protocol: TCP +``` +
+ +### 2. Verify Exporter Endpoint + +Check jmx-exporter: +```bash +kubectl -n demo exec -it pods/kafka-separated-cluster-kafka-broker-0 -- \ + curl -s http://127.0.0.1:5556/metrics | head -n 50 +``` + +Check kafka-exporter: + +```bash +kubectl -n demo exec -it pods/kafka-separated-cluster-kafka-broker-0 -- \ + curl -s http://kafka-separated-cluster-kafka-exporter-0.kafka-separated-cluster-kafka-exporter-headless.demo.svc:9308/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: kafka-jmx-pod-monitor + namespace: demo + labels: # match labels in `prometheus.spec.podMonitorSelector` + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # defines the labels which are transferred from the + # associated Kubernetes `Pod` object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/instance: kafka-separated-cluster +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where Kafka runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/kafka-separated-cluster-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="kafka-separated-cluster"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "kafka-separated-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "kafka-broker", + "apps_kubeblocks_io_pod_name": "kafka-separated-cluster-kafka-broker-2", + "container": "jmx-exporter", + "endpoint": "metrics", + "instance": "10.244.0.236:5556", + "job": "kubeblocks", + "namespace": "demo", + "pod": "kafka-separated-cluster-kafka-broker-2" + }, + "value": [ + 1747654851.995, + "1" + ] + }, +... // more lines ommited +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks Kafka dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Import dashboard from [Grafana Kafka Dashboard](https://github.com/apecloud/kubeblocks-addons/tree/main/addons/kafka/dashboards) + +![kafka-jmx-monitoring-grafana-dashboard.png](/img/docs/en/kafka-jmx-monitoring-grafana-dashboard.png) +Figure 1. Kakfa jmx dashboard + + +![kafka-monitoring-grafana-dashboard.png](/img/docs/en/kafka-monitoring-grafana-dashboard.png) +Figure 2. Kafka exporter dashboard + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor kafka-separated-cluster-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a Kafka cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the Kafka exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your Kafka databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-kafka/_category_.yml new file mode 100644 index 00000000..687069c6 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for Kafka Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..0e82fc26 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_create-cluster.mdx @@ -0,0 +1,84 @@ +KubeBlocks uses a declarative approach for managing Kafka Clusters. +Below is an example configuration for deploying a Kafka Cluster with 3 components + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: kafka + topology: separated_monitor + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + + +:::note + +These three components will be created strictly in `controller->broker->exporter` order as defined in `ClusterDefinition`. + +::: \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..b94f7513 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster kafka-separated-cluster -n demo -w +``` + +Expected Output: + +```bash +kubectl get cluster kafka-separated-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +kafka-separated-cluster kafka Delete Creating 13s +kafka-separated-cluster kafka Delete Running 63s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 13m +kafka-separated-cluster-kafka-controller-0 2/2 Running 0 13m +kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 12m +``` + +Once the cluster status becomes Running, your Kafka cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/01-overview.mdx new file mode 100644 index 00000000..38990d45 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/01-overview.mdx @@ -0,0 +1,84 @@ +--- +title: Overview of KubeBlocks Milvus Addon +description: Learn about the features and capabilities of the KubeBlocks Milvus addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [Milvus, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks Milvus Addon + +Milvus is an open source (Apache-2.0 licensed) vector database built to power embedding similarity search and AI applications. Milvus's architecture is designed to handle large-scale vector datasets and includes various deployment modes: Milvus Standalone, and Milvus Distributed, to accommodate different data scale needs. + +## Key Features + +### Supported Topologies + +Milvus supports two deployment modes to accommodate different scale requirements: + +#### Standalone Mode + +A lightweight deployment suitable for development and testing: + +- **Milvus Core**: Provides vector search and database functionality +- **Metadata Storage (ETCD)**: Stores cluster metadata and configuration +- **Object Storage (MinIO/S3)**: Persists vector data and indexes + +#### Cluster Mode + +A distributed deployment for production workloads with multiple specialized components: + +**Access Layer** + +- Stateless proxies that handle client connections and request routing + +**Compute Layer** + +- Query Nodes: Execute search operations +- Data Nodes: Handle data ingestion and compaction +- Index Nodes: Build and maintain vector indexes + +**Coordination Layer** + +- Root Coordinator: Manages global metadata +- Query Coordinator: Orchestrates query execution +- Data Coordinator: Manages data distribution +- Index Coordinator: Oversees index building + +**Storage Layer** + +- Metadata Storage (ETCD): Cluster metadata and configuration +- Object Storage (MinIO/S3): Persistent vector data storage +- Log Storage (Pulsar): Message queue for change data capture + + +### Lifecycle Management + +KubeBlocks simplifies Milvus operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for Milvus instances | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Custom Services** | Expose specialized database endpoints | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + +### Supported Versions + +KubeBlocks Milvus Addon supports these Milvus versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 2.3 | 2.3.2 | + + +The list of supported versions can be found by following command: +```bash +kubectl get cmpv milvus +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/02-quickstart.mdx new file mode 100644 index 00000000..137ce338 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/02-quickstart.mdx @@ -0,0 +1,436 @@ +--- +title: Milvus Quickstart +description: Comprehensive guide to deploying and managing Milvus ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices,an alternative to dedicated operator. +keywords: [Kubernetes Operator, Milvus, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Milvus Quickstart + +This guide provides a comprehensive walkabout for deploying and managing Milvus ReplicaSet Clusters using the **KubeBlocks Milvus Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify Milvus Add-on + +The Milvus Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep milvus +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-milvus kb-system 1 2025-05-21 deployed milvus-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/milvus --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-milvus kubeblocks-addons/milvus --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search milvus + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install milvus --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + milvus 0.9.0 kubeblocks + milvus 0.9.1 kubeblocks + milvus 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable milvus + # Disable Add-on + kbcli addon disable milvus + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the Milvus Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported Milvus Versions + +**List available Milvus versions:** + +```bash +kubectl get cmpv milvus +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +milvus v2.3.2 Available 26d +``` +
+ +### Storage Configuration + +Milvus requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a Milvus Cluster + +Deploy a basic Milvus Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/milvus/cluster-standalone.yaml +``` + +This creates: +- A Milvus Cluster with 3 replicas, one for milvus, one for etcd and one for minio. +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-standalone + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `milvus` to create a Milvus Cluster + clusterDef: milvus + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # Valid options are: [standalone,cluster] + topology: standalone + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: etcd + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: minio + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: milvus + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +## Verify Cluster Status + +When deploying a Milvus Cluster with 3 replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational + +Check status using either method: + + + +```bash +kubectl get cluster milvus-standalone -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 27s +milvus-standalone milvus Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=milvus-standalone -n demo +NAME READY STATUS RESTARTS AGE +milvus-standalone-etcd-0 2/2 Running 0 25m +milvus-standalone-milvus-0 1/1 Running 0 24m +milvus-standalone-minio-0 1/1 Running 0 25m +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe milvus-standalone -n demo + +Name: milvus-standalone Created Time: May 19,2025 11:03 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo milvus standalone Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +etcd 3.5.15 milvus-standalone-etcd-0 leader Running zone-x x.y.z May 19,2025 11:03 UTC+0800 +milvus v2.3.2 milvus-standalone-milvus-0 Running zone-x x.y.z May 19,2025 11:04 UTC+0800 +minio 8.0.17 milvus-standalone-minio-0 Running zone-x x.y.z May 19,2025 11:03 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +etcd 500m / 500m 512Mi / 512Mi data:10Gi +minio 500m / 500m 512Mi / 512Mi data:10Gi +milvus 500m / 500m 512Mi / 512Mi data:10Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +etcd etcd-3-1.0.0 quay.io/coreos/etcd:v3.5.15 +minio milvus-minio-1.0.0 docker.io/minio/minio:RELEASE.2022-03-17T06-34-49Z +milvus milvus-standalone-1.0.0 docker.io/milvusdb/milvus:v2.3.2 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo milvus-standalone +``` + + + + +## Access Milvus + +To access the Milvus service, you can expose the service by creating a service: + +```bash +kubectl port-forward pod/milvus-standalone-milvus-0 -n demo 19530:19530 +``` + +And then you can access the Milvus service via `localhost:19530`. + +## Stop the Milvus Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-stop + namespace: demo + spec: + clusterName: milvus-standalone + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster milvus-standalone -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + +## Start the Milvus Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-start + namespace: demo + spec: + clusterName: milvus-standalone + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster milvus-standalone -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + + +## Delete Milvus Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster milvus-standalone -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster milvus-standalone -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/01-standlone.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/01-standlone.mdx new file mode 100644 index 00000000..6e2cc9ad --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/01-standlone.mdx @@ -0,0 +1,135 @@ +--- +title: Deploying a Milvus Standalone Cluster with KubeBlocks +description: Learn how to deploy a Milvus Standalone Cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, Redis, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: Milvus Standalone Cluster +--- + +# Deploying a Milvus Standalone Cluster with KubeBlocks + +Standalone is a lightweight deployment suitable for development and testing with following components: + +- **Milvus Core**: Provides vector search and database functionality +- **Metadata Storage (ETCD)**: Stores cluster metadata and configuration +- **Object Storage (MinIO/S3)**: Persists vector data and indexes + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Milvus Standalone Cluster + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-standalone + namespace: demo +spec: + terminationPolicy: Delete + # The value must be `milvus` to create a Milvus Cluster + clusterDef: milvus + # Valid options are: [standalone,cluster] + topology: standalone + componentSpecs: + - name: etcd + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: minio + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: milvus + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + +**Key Configuration Details**: +- `clusterDef: milvus`: Specifies the ClusterDefinition CR for the cluster. +- `topology: standalone`: Configures the cluster to use standalone topology. +- `componentSpecs`: Defines the components in the cluster + + +## Verifying the Deployment + +### Check the Cluster Status +Once the cluster is deployed, check its status: +```bash +kubectl get cluster milvus-standalone -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 40s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Updating 71s +milvus-standalone milvus Delete Running 2m55s +``` + +### Verify Component Status +```bash +kubectl get component -n demo -l app.kubernetes.io/instance=milvus-standalone +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-standalone-etcd etcd-3-1.0.0 3.5.15 Running 3m5s +milvus-standalone-milvus milvus-standalone-1.0.0 v2.3.2 Running 114s +milvus-standalone-minio milvus-minio-1.0.0 8.0.17 Running 3m5s +``` + +## Cleanup +To remove all resources created during this tutorial: + +```bash +kubectl delete cluster milvus-standalone -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/02-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/02-cluster.mdx new file mode 100644 index 00000000..d74933c9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/02-cluster.mdx @@ -0,0 +1,522 @@ +--- +title: Deploying a Milvus Cluster with KubeBlocks +description: Learn how to deploy a Redis Replication cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, Redis, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: Milvus Cluster +--- + +# Deploying a Milvus Cluster with KubeBlocks + +Milvus Cluster is a distributed deployment for production workloads with multiple specialized components: + +**Access Layer** + +- Stateless proxies that handle client connections and request routing + +**Compute Layer** + +- Query Nodes: Execute search operations +- Data Nodes: Handle data ingestion and compaction +- Index Nodes: Build and maintain vector indexes + +**Coordination Layer** + +- Root Coordinator: Manages global metadata +- Query Coordinator: Orchestrates query execution +- Data Coordinator: Manages data distribution +- Index Coordinator: Oversees index building + +**Storage Layer** + +- Metadata Storage (ETCD): Cluster metadata and configuration +- Object Storage (MinIO/S3): Persistent vector data storage +- Log Storage (Pulsar): Message queue for change data capture + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Milvus Cluster + +### Step 1. Deploy an ETCD Cluster + +ETCD cluster is for metadata storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: etcdm-cluster + namespace: demo +spec: + terminationPolicy: WipeOut + componentSpecs: + - name: etcd + componentDef: etcd-3-1.0.0 + serviceVersion: 3.5.6 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 2. Deploy a minio Cluster + +Minio is for object storage +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: miniom-cluster + namespace: demo +spec: + terminationPolicy: WipeOut + componentSpecs: + - name: minio + componentDef: milvus-minio-1.0.0 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 3. Deploy a Pulsar Cluster + +Pulsar is for log storage +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsarm-cluster + namespace: demo +spec: + terminationPolicy: Delete + # The value must be `pulsar` to create a Pulsar Cluster + clusterDef: pulsar + topology: pulsar-basic-cluster + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + componentSpecs: + - name: broker + serviceVersion: 3.0.2 + replicas: 1 + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "false" + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + serviceVersion: 3.0.2 + replicas: 4 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: zookeeper + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "100m" + memory: "512Mi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi +``` + + +### Deploy a Milvus Cluster + +The cluster will be created with the following components: + +- Proxy +- Data Node +- Index Node +- Query Node +- Mixed Coordinator + +And each component will be created with `serviceRef` to the corresponding service: etcd, minio, and pulsar created previously. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + namespace: demo + name: milvus-cluster +spec: + terminationPolicy: Delete + # The value must be `milvus` to create a Milvus Cluster + clusterDef: milvus + # Valid options are: [standalone,cluster] + topology: cluster + componentSpecs: + - name: proxy + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # Defines a list of ServiceRef for a Component + serviceRefs: + - name: milvus-meta-storage # Specifies the identifier of the service reference declaration, defined in `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: demo # namepspace of referee cluster, update on demand + # References a service provided by another KubeBlocks Cluster + clusterServiceSelector: + cluster: etcdm-cluster # ETCD Cluster Name, update the cluster name on demand + service: + component: etcd # component name, should be etcd + service: headless # Refer to default headless Service + port: client # Refer to port name 'client' + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster # Pulsar Cluster Name + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster # Minio Cluster Name + service: + component: minio + service: headless + port: http + credential: # Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. + component: minio # for component 'minio' + name: admin # the name of the credential (SystemAccount) to reference, using account 'admin' in this case + - name: mixcoord + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: datanode + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: indexnode + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: querynode + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin +``` + +:::note + +Clusters, such as Pulsar, Minio and ETCD, have multiple ports for different services. +When creating Cluster with `serviceRef`, you should know which `port` providing corresponding services. +For instance, in MinIO, there are mainly four ports: 9000, 9001, 3501, and 3502, and they are used for different services or functions. + +::: + + +Service References are specified using `serviceRefs` as follows, please update cluster names and ports accordingly w.r.t your running environment. +```yaml +# Defines a list of ServiceRef for a Component +serviceRefs: + - name: milvus-meta-storage # Specifies the identifier of the service reference declaration, defined in `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: demo # namepspace of referee cluster, update on demand + # References a service provided by another KubeBlocks Cluster + clusterServiceSelector: + cluster: etcdm-cluster # ETCD Cluster Name, update the cluster name on demand + service: + component: etcd # component name, should be etcd + service: headless # Refer to default headless Service + port: client # NOTE: Refer to port name 'client', for port number '3501' + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster # Pulsar Cluster Name + service: + component: broker + service: headless + port: pulsar # NOTE: Refer to port name 'pulsar', for port number '6650' + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster # Minio Cluster Name + service: + component: minio + service: headless + port: http # NOTE: Refer to port name 'http', for port number '9000' + credential: # Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. + component: minio # for component 'minio' + name: admin # NOTE: the name of the credential (SystemAccount) to reference, using account 'admin' in this case +``` + + + +## Verifying the Deployment + +### Check the Cluster Status +Once the cluster is deployed, check its status: +```bash +kubectl get cluster milvus-cluster -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-cluster milvus Delete Running 4m38s +``` + +### Verify Component and Pod Status +```bash +kubectl get component -l app.kubernetes.io/instance=milvus-cluster -n demo +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-cluster-datanode milvus-datanode-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-indexnode milvus-indexnode-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-mixcoord milvus-mixcoord-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-proxy milvus-proxy-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-querynode milvus-querynode-1.0.0 v2.3.2 Running 5m8s +``` + +Check pods: + +```bash +kubectl get pods -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-datanode-0 1/1 Running 0 5m30s +milvus-cluster-indexnode-0 1/1 Running 0 5m31s +milvus-cluster-mixcoord-0 1/1 Running 0 5m32s +milvus-cluster-proxy-0 1/1 Running 0 5m32s +milvus-cluster-querynode-0 1/1 Running 0 5m31s +milvus-cluster-querynode-1 1/1 Running 0 3m51s +``` + +## Cleanup +To remove all resources created during this tutorial: + +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete cluster etcdm-cluster -n demo +kubectl delete cluster miniom-cluster -n demo +kubectl delete cluster pulsarm--cluster -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/_category_.yml new file mode 100644 index 00000000..f041cfad --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +position: 3 +label: Topologies +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..8ac05749 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,257 @@ +--- +title: Milvus Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage Milvus Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, Milvus, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Milvus Cluster Lifecycle Management + +This guide demonstrates how to manage a Milvus Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a Milvus Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-stop-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Stop +``` + + + + +Option 2: Cluster API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-stop-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Stop +``` + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Stopping 6m33s + milvus-cluster milvus Delete Stopped 6m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=milvus-cluster -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +### Starting the Cluster + +Starting a stopped Milvus Cluster: +1. Recreates all pods +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: milvus-cluster + type: Start +``` + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Updating 30m + milvus-cluster milvus Delete Updating 32m + milvus-cluster milvus Delete Updating 32m + milvus-cluster milvus Delete Running 33m + milvus-cluster milvus Delete Running 33m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + milvus-cluster-datanode-0 1/1 Running 0 5m24s + milvus-cluster-indexnode-0 1/1 Running 0 5m24s + milvus-cluster-mixcoord-0 1/1 Running 0 5m24s + milvus-cluster-proxy-0 1/1 Running 0 5m24s + milvus-cluster-querynode-0 1/1 Running 0 5m24s + milvus-cluster-querynode-1 1/1 Running 0 3m43s + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Check Components** + +There are five components in Milvus Cluster. To get the list of components, +```bash +kubectl get cluster -n demo milvus-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +Expected Output: +```text +proxy +mixcoord +datanode +indexnode +querynode +``` + + +**Restart Proxy via OpsRequest API** + +List specific components to be restarted: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-restart-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Restart + restart: + - componentName: proxy +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest milvus-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-restart-ops Restart milvus-cluster Running 0/1 4s + milvus-cluster-restart-ops Restart milvus-cluster Running 1/1 2m12s + milvus-cluster-restart-ops Restart milvus-cluster Running 1/1 2m12s + milvus-cluster-restart-ops Restart milvus-cluster Succeed 1/1 2m12s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster + ``` + Note: Pods will show new creation timestamps after restart. Only pods belongs to component `proxy` have been restarted. + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a Milvus Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your Milvus Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..b7e71398 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,189 @@ +--- +title: Vertical Scaling in a Milvus Cluster +description: Learn how to perform vertical scaling in a Milvus Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, Milvus, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for Milvus Standalone Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a Milvus Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for Milvus instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks ensures minimal impact during scaling operations by following a controlled, role-aware update strategy: +**Role-Aware Replicas (Primary/Secondary Replicas)** +- Secondary replicas update first – Non-leader pods are upgraded to minimize disruption. +- Primary updates last – Only after all secondaries are healthy does the primary pod restart. +- Cluster state progresses from Updating → Running once all replicas are stable. + +**Role-Unaware Replicas (Ordinal-Based Scaling)** +If replicas have no defined roles, updates follow Kubernetes pod ordinal order: +- Highest ordinal first (e.g., pod-2 → pod-1 → pod-0) to ensure deterministic rollouts. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## Vertical Scale + +**Expected Workflow**: + +1. Pods are updated in pod ordinal order, from highest to lowest, (e.g., pod-2 → pod-1 → pod-0) +1. Cluster status transitions from `Updating` to `Running` + +**Check Components** + +There are five components in Milvus Cluster. To get the list of components, +```bash +kubectl get cluster -n demo milvus-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +Expected Output: +```text +proxy +mixcoord +datanode +indexnode +querynode +``` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the **querynode** component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-vscale-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: VerticalScaling + verticalScaling: + - componentName: querynode + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops milvus-cluster-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 0/2 33s + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 1/2 55s + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 2/2 88s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: querynode + replicas: 1 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + + +:::note + +Milvus Cluster consists of five components. This tutorial shows how to perform changes to one component. +You may perform changes to other components in the same way. + +::: + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting Milvus parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe milvus-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +milvus 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the Milvus Cluster along with its namespace: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a Milvus Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the milvus component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your Milvus Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..3d3f7e16 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,240 @@ +--- +title: Horizontal Scaling of Milvus Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a Milvus cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, Milvus, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for Milvus Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a Milvus cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running`. +2. Cluster status changes from `Updating` to `Running` + + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the Milvus cluster by adding 1 replica to milvus component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-scale-out-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops milvus-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Running 0/1 9s + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Running 1/1 16s + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Succeed 1/1 16s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: querynode + replicas: 3 # increase replicas from 2 to 3 by 1 + ... + ``` + + Or you can patch the cluster CR with command: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 3 + } +]' +``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the Milvus cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster,apps.kubeblocks.io/component-name=querynode +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 1/1 Running 0 85m +milvus-cluster-querynode-1 1/1 Running 0 87m +milvus-cluster-querynode-2 1/1 Running 0 99m +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the Milvus cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-scale-in-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops milvus-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Running 0/1 8s + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Running 1/1 24s + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: milvus + replicas: 2 # decrease replicas from 3 to 2 by 1 + ``` + +Or you can patch the cluster CR with command: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ +{ + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 2 + } +]' +``` + + + + + +### Verify Scale-In + +Example Output (Two Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 1/1 Running 0 101m +milvus-cluster-querynode-1 1/1 Running 0 102m +``` + +:::note + +Milvus Cluster consists of five components. This tutorial shows how to perform changes to one component. +You may perform changes to other components in the same way. + +::: + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the Milvus cluster along with its namespace: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a Milvus cluster. +- Perform scale-in operations to remove replicas from a Milvus cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..6cb8a41d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,295 @@ +--- +title: Create and Destroy Milvus Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage Milvus services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, Milvus, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage Milvus Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage Milvus Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing Milvus services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## View Network Services +List the Services created for the Milvus cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +milvus-cluster-proxy ClusterIP 10.96.157.187 19530/TCP,9091/TCP 133m +``` + +## Expose Milvus Service + +External service addresses enable public internet access to Milvus, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the Milvus service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: milvus-cluster + expose: + - componentName: milvus + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + ports: + - name: milvus + port: 19530 + protocol: TCP + targetPort: milvus + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops milvus-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-expose-enable-ops Expose milvus-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: milvus-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: milvus + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: milvus + name: milvus-internet + serviceName: milvus-internet + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: milvus + port: 19530 + protocol: TCP + targetPort: milvus + type: LoadBalancer # [ClusterIP, NodePort, LoadBalancer] + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +milvus-cluster-milvus-internet LoadBalancer 172.20.60.24 19530:31243/TCP 1m +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: milvus-cluster + expose: + - componentName: milvus + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops milvus-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-expose-disable-ops Expose milvus-cluster Succeed 1/1 24s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster milvus-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Running 44m + ``` + + + +### Verify Service Removal + +Ensure that the 'milvus-cluster-milvus-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +Expected Result: The 'milvus-cluster-milvus-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the Milvus cluster along with its namespace: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a Milvus service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing Milvus services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..04313766 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,143 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed Milvus Clusters +description: Learn how to decommission (take offline) a specific Pod in a Milvus cluster managed by KubeBlocks. +keywords: [KubeBlocks, Milvus, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission Milvus Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed Milvus Clusters + +This guide explains how to decommission (take offline) specific Pods in Milvus clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + + +Before decommissioning a specific pod from a component, make sure this component has more than one replicas. +If not, please scale out the componen ahead. + +E.g. you can patch the cluster CR with command, to declare there are 3 replicas in component querynode. + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 3 + } +]' +``` + + +To decommission a specific Pod (e.g., 'milvus-cluster-querynode-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-decommission-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + scaleIn: + onlineInstancesToOffline: + - 'milvus-cluster-querynode-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops milvus-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Running 0/1 8s +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Running 1/1 31s +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: milvus + replicas: 2 # explected replicas after decommission + offlineInstances: + - milvus-cluster-querynode-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 2/2 Running 0 25m +milvus-cluster-querynode-2 2/2 Running 0 24m +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..e5347f98 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,227 @@ +--- +title: Observability for Milvus Clusters with the Prometheus Operator +description: Learn how to set up observability for Milvus Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, Milvus, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for Milvus Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Milvus Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for Milvus clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in Milvus exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a Milvus Cluster + +Please refer to [Deploying a Milvus Cluster with KubeBlocks](../03-topologies/02-cluster) to deploy a milvus cluster. + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint + +```bash +kubectl -n demo exec -it pods/milvus-cluster-proxy-0 -- \ + curl -s http://127.0.0.1:9091/metrics | head -n 50 +``` + +Perform the verification against all Milvus replicas, including: +- milvus-cluster-datanode +- milvus-cluster-indexnode +- milvus-cluster-mixcoord +- milvus-cluster-proxy +- milvus-cluster-querynode + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: milvus-cluster-pod-monitor + namespace: demo + labels: # Must match the setting in 'prometheus.spec.podMonitorSelector' + release: prometheus +spec: + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + relabelings: + - targetLabel: app_kubernetes_io_name + replacement: milvus + namespaceSelector: + matchNames: + - demo # Target namespace + selector: + matchLabels: + app.kubernetes.io/instance: milvus-cluster +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where Milvus runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + + +It sets up a `PodMonitor` to monitor the Milvus cluster and scrapes the metrics from the Milvus components. + +```yaml + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + relabelings: + - targetLabel: app_kubernetes_io_name + replacement: milvus # add a label to the target: app_kubernetes_io_name=milvus +``` + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/milvus-cluster-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=milvus_num_node{app_kubernetes_io_name="milvus"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "milvus_num_node", + "app_kubernetes_io_name": "milvus", + "container": "indexnode", + "endpoint": "metrics", + "instance": "10.244.0.149:9091", + "job": "demo/milvus-cluster-pod-monitor", + "namespace": "demo", + "node_id": "23", + "pod": "milvus-cluster-indexnode-0", + "role_name": "indexnode" + }, + "value": [ + 1747637044.313, + "1" + ] + }, + { + "metric": { + "__name__": "milvus_num_node", + "app_kubernetes_io_name": "milvus", + "container": "querynode", + "endpoint": "metrics", + "instance": "10.244.0.153:9091", + "job": "demo/milvus-cluster-pod-monitor", + "namespace": "demo", + "node_id": "27", + "pod": "milvus-cluster-querynode-1", + "role_name": "querynode" + }, + "value": [ + 1747637044.313, + "1" + ] + }, + ... // more output ommitted. +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks Milvus dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Import dashboard from: [Milvus Dashboard](https://raw.githubusercontent.com/milvus-io/milvus/refs/heads/master/deployments/monitor/grafana/milvus-dashboard.json) + For more details please refer to [Milvus WebSite](https://milvus.io/) + +![milvus-monitoring-grafana-dashboard.png](/img/docs/en/milvus-monitoring-grafana-dashboard.png) + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor milvus-cluster-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a Milvus cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the Milvus exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your Milvus databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-milvus/_category_.yml new file mode 100644 index 00000000..719469ae --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for Milvus Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..be9afea3 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_create-cluster.mdx @@ -0,0 +1,36 @@ +KubeBlocks uses a declarative approach for managing Milvus Clusters. +Below is an example configuration for deploying a Milvus Cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: milvus + topology: clustermode + componentSpecs: + - name: milvus + serviceVersion: 3.13.7 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..0b4194c2 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx @@ -0,0 +1,48 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster milvus-cluster -n demo -w +``` + +Expected Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 40s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Updating 71s +milvus-standalone milvus Delete Running 2m55s +``` + +Check the component and pod status: + +```bash +kubectl get component -n demo -l app.kubernetes.io/instance=milvus-standalone +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-standalone-etcd etcd-3-1.0.0 3.5.15 Running 3m5s +milvus-standalone-milvus milvus-standalone-1.0.0 v2.3.2 Running 114s +milvus-standalone-minio milvus-minio-1.0.0 8.0.17 Running 3m5s +``` + + +```bash +kubectl get pods -l app.kubernetes.io/instance=milvus-standalone -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +milvus-standalone-etcd-0 2/2 Running 0 4m31s +milvus-standalone-milvus-0 1/1 Running 0 3m20s +milvus-standalone-minio-0 1/1 Running 0 4m31s +``` + +Once the cluster status becomes Running, your Milvus cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/01-overview.mdx new file mode 100644 index 00000000..4285a917 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/01-overview.mdx @@ -0,0 +1,73 @@ +--- +title: Overview of KubeBlocks MongoDB Addon +description: Learn about the features and capabilities of the KubeBlocks MongoDB addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [MongoDB, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks MongoDB Addon + +MongoDB is a NoSQL document database designed for flexibility, scalability, and high performance. Unlike traditional relational databases (SQL), MongoDB stores data in JSON-like documents (BSON format), making it ideal for unstructured or semi-structured data. + +### Supported Topologies + +**replicaset** + +A MongoDB replica set is a group of MongoDB servers that maintain the same dataset, providing high availability and data redundancy. Replica sets are the foundation of MongoDB's fault tolerance and data reliability. By replicating data across multiple nodes, MongoDB ensures that if one server fails, another can take over seamlessly without affecting the application's availability. + +In a replica set, there are typically three types of nodes: + +- **Primary Node**: Handles all write operations and serves read requests by default. +- **Secondary Nodes**: Maintain copies of the primary's data and can optionally serve read requests. +- **Arbiter Node**: Participates in elections but does not store data. It is used to maintain an odd number of voting members in the replica set. + +And it is recommended to create a cluster with at least **three** nodes to ensure high availability, one primary and two secondary nodes. + +### Lifecycle Management + +KubeBlocks simplifies MongoDB operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for MongoDB instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Password Management** | Ability to set and manage custom root password for the MongoDB cluster during creation | +| **Custom Services** | Expose specialized database endpoints | +| **Switchover** | Planned primary-replica role changes | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + + +### Backup and Restore + +KubeBlocks supports multiple backup strategies for MongoDB: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | dump | Uses `mongodump`, a MongoDB utility used to create a binary export of the contents of a database | +| Full Backup | datafile | Backup the data files of the database | +| Continuous Backup | archive-oplog | Continuously archives MongoDB oplog using `wal-g` | + +### Supported Versions + +KubeBlocks MongoDB Addon supports these MongoDB versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 4.0 | 4.0.28,4.2.24,4.4.29 | +| 5.0 | 5.0.28 | +| 6.0 | 6.0.22,6.0.20,6.0.16 | +| 7.0 | 7.0.19,7.0.16,7.0.12 | +| 8.0 | 8.0.8,8.0.6,8.0.4| + +The list of supported versions can be found by following command: +```bash +kubectl get cmpv mongodb +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/02-quickstart.mdx new file mode 100644 index 00000000..d5328a01 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/02-quickstart.mdx @@ -0,0 +1,544 @@ +--- +title: MongoDB Quickstart +description: Comprehensive guide to deploying and managing MongoDB ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, MongoDB, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB Quickstart + +This guide provides a comprehensive walkabout for deploying and managing MongoDB ReplicaSet Clusters using the **KubeBlocks MongoDB Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify MongoDB Add-on + +The MongoDB Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep mongodb +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-mongodb kb-system 1 2025-05-21 deployed mongodb-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/mongodb --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-mongodb kubeblocks-addons/mongodb --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search mongodb + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install mongodb --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + mongodb 0.9.0 kubeblocks + mongodb 0.9.1 kubeblocks + mongodb 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable mongodb + # Disable Add-on + kbcli addon disable mongodb + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the MongoDB Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported MongoDB Versions + +**List available MongoDB versions:** + +```bash +kubectl get cmpv mongodb +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +mongodb 8.0.8,8.0.6,8.0.4,7.0.19,7.0.16,7.0.12,6.0.22,6.0.20,6.0.16,5.0.30,5.0.28,4.4.29,4.2.24,4.0.28 Available 26d +``` +
+ +**Check version compatibility for ComponentDefinitions** + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv mongodb -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +mongodb-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv mongodb -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("mongodb"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `mongodb`: + +
+Example Output +```text +8.0.8 +8.0.6 +8.0.4 +7.0.19 +7.0.16 +7.0.12 +6.0.22 +6.0.20 +6.0.16 +5.0.30 +5.0.28 +4.4.29 +4.2.24 +4.0.28 +``` +
+ +### Storage Configuration + +MongoDB requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a MongoDB Replication Cluster + +Deploy a basic MongoDB ReplicaSet Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/cluster.yaml +``` + +This creates: +- A MongoDB ReplicaSet Cluster with two components, MongoDB(2 replicas) and MongoDB Sentinel(3 replicas). +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage +- Automatic primary-replica configuration + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `mongodb` to create a MongoDB Cluster + clusterDef: mongodb + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # Valid options are [replicaset] + topology: replicaset + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: mongodb + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # Valid options are: [4.0.28,4.2.24,4.4.29,5.0.28,6.0.16,7.0.1] + serviceVersion: "6.0.16" + # Specifies the desired number of replicas in the Component + replicas: 3 + # Specifies the resources required by the Component. + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +### Create a Version-Specific MongoDB ReplicaSet Cluster + +To create a cluster with a specific version, configure `spec.componentSpecs.serviceVersion` (major.minor version) fields before applying it: + + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 4.0.28 # Valid options: [4.0.28,4.2.24,4.4.29] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 5.0.28 # Valid options: [5.0.28] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 6.0.22 # Valid options: [6.0.22,6.0.20,6.0.16 ] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 7.0.19 # Valid options: [7.0.19,7.0.16,7.0.12] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 8.0.8 # Valid options: [8.0.8,8.0.6,8.0.4] + ``` + + + + +## Verify Cluster Status + +When deploying a MongoDB ReplicaSet Cluster with 1 primary replica and 2 secondary replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational +3. Replicas have correct roles + +Check status using either method: + + + +```bash +kubectl get cluster mongo-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster mongodb Delete Creating 49s +mongo-cluster mongodb Delete Running 62s + +kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role -n demo +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 78s primary +mongo-cluster-mongodb-1 2/2 Running 0 63s secondary +mongo-cluster-mongodb-2 2/2 Running 0 48s secondary +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe mongo-cluster -n demo + +Name: mongo-cluster Created Time: May 18,2025 21:16 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo mongodb replicaset Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +mongodb mongo-cluster-mongodb.demo.svc.cluster.local:27017 + mongo-cluster-mongodb-mongodb.demo.svc.cluster.local:27017 + mongo-cluster-mongodb-mongodb-ro.demo.svc.cluster.local:27017 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mongodb 6.0.16 mongo-cluster-mongodb-0 primary Running zone-x x.y.z May 18,2025 21:16 UTC+0800 +mongodb 6.0.16 mongo-cluster-mongodb-1 secondary Running zone-x x.y.z May 18,2025 21:16 UTC+0800 +mongodb 6.0.16 mongo-cluster-mongodb-2 secondary Running zone-x x.y.z May 18,2025 21:17 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mongodb 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mongodb mongodb-1.0.0 docker.io/library/mongo:6.0.16 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo mongo-cluster +``` + + + + +## Access the MongoDB ReplicaSet Cluster + +KubeBlocks automatically provisions: +1. Credentials stored in Secret `mongo-cluster-mongodb-account-root` +2. ClusterIP Service `mongo-cluster-mongodb-mongodb` for read-write (Primary) replica +3. ClusterIP Service `mongo-cluster-mongodb-mongodb-ro` for read-only (Secondary) replicas + +### Retrieve Credentials +```bash +# Get username +NAME=$(kubectl get secret -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.username}' | base64 --decode) +# Get password +PASSWD=$(kubectl get secret -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 --decode) +``` + +### Connection Methods + + + + + Connect directly to a pod: + ```bash + kubectl exec -ti -n demo mongo-cluster-mongodb-0 -- \ + mongosh "mongodb://${NAME}:${PASSWD}@mongo-cluster-mongodb-mongodb:27017/admin" + ``` + + + + + 1. Forward service port: + + ```bash + kubectl port-forward svc/mongo-cluster-mongodb-mongodb 27017:27017 -n demo + ``` + + + 2. Connect via localhost: + ```bash + mongosh "mongodb://${NAME}:${PASSWD}@127.0.0.1:27017/admin" + ``` + + + +:::note +**Production Considerations** + +For production environments, avoid using `kubectl exec` and `port-forward`. Instead implement: +- LoadBalancer or NodePort Services for external access +- Network policies to restrict access +- TLS encryption for secure connections +- Connection pooling for better performance +::: + +## Stop the MongoDB ReplicaSet Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-stop + namespace: demo + spec: + clusterName: mongo-cluster + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + ```yaml + spec: + componentSpecs: + - name: mongodb + stop: true # Set to stop component + replicas: 2 + ``` + + + +## Start the MongoDB ReplicaSet Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-start + namespace: demo + spec: + clusterName: mongo-cluster + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + + + + + +## Delete MongoDB ReplicaSet Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster mongo-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster mongo-cluster -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..7ab136f0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,283 @@ +--- +title: MongoDB ReplicaSet Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage MongoDB ReplicaSet Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, MongoDB, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB ReplicaSet Cluster Lifecycle Management + +This guide demonstrates how to manage a MongoDB ReplicaSet Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB ReplicaSet Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a MongoDB ReplicaSet Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-stop-ops + namespace: demo +spec: + clusterName: mongo-cluster + type: Stop +``` + + + + +Option 2: Cluster API Patch + +Modify the cluster spec directly by patching the stop field: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Stopping 6m3s + mongo-cluster mongodb Delete Stopped 6m55s + + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=mongo-cluster -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-mongo-cluster-mongodb-0 Bound pvc-uuid 20Gi RWO 22m + data-mongo-cluster-mongodb-1 Bound pvc-uuid 20Gi RWO 21m + data-mongo-cluster-mongodb-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### Starting the Cluster + +Starting a stopped MongoDB ReplicaSet Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: mongo-cluster + type: Start +``` + + + + + +Modify the cluster spec to resume operation: +1. Set stop: false, or +2. Remove the stop field entirely + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Updating 24m + mongo-cluster mongodb Delete Running 24m + mongo-cluster mongodb Delete Running 24m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE ROLE + mongo-cluster-mongodb-0 2/2 Running 0 55s primary + mongo-cluster-mongodb-1 2/2 Running 0 44s secondary + mongo-cluster-mongodb-2 2/2 Running 0 33s secondary + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Using OpsRequest API** + +Target specific components `mongodb` for restart: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restart-ops + namespace: demo +spec: + clusterName: mongo-cluster + type: Restart + restart: + - componentName: mongodb +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest mongo-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-restart-ops Restart mongo-cluster Running 0/3 4s + mongo-cluster-restart-ops Restart mongo-cluster Running 1/3 28s + mongo-cluster-restart-ops Restart mongo-cluster Running 2/3 56s + mongo-cluster-restart-ops Restart mongo-cluster Running 2/3 109s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster + ``` + Note: Pods will show new creation timestamps after restart + +3. Verify component health: + ```bash + kbcli cluster describe mongo-cluster -n demo + ``` + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a MongoDB ReplicaSet Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your MongoDB ReplicaSet Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..c571c618 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,174 @@ +--- +title: Vertical Scaling in a MongoDB ReplicaSet Cluster +description: Learn how to perform vertical scaling in a MongoDB ReplicaSet Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, MongoDB, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for MongoDB Replication Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a MongoDB ReplicaSet Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for MongoDB instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks orchestrates scaling with minimal impact: +1. Secondary replicas update first +2. Primary updates last after secondaries are healthy +3. Cluster status transitions from `Updating` to `Running` + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB ReplicaSet Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Secondary replicas are updated first (one at a time) +1. Primary is updated last after secondary replicas are healthy +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the mongodb component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-vscale-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: VerticalScaling + verticalScaling: + - componentName: mongodb + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + What Happens During Vertical Scaling? + - Secondary Pods are recreated first to ensure the primary Pod remains available. + - Once all secondary Pods are updated, the primary Pod is restarted with the new resource configuration. + + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops mongo-cluster-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 0/3 32s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 1/3 55s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 2/3 82s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 3/3 2m13s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 3 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting MongoDB parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe mongo-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mongodb 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the MongoDB ReplicaSet Cluster along with its namespace: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a MongoDB ReplicaSet Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the mongodb component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your MongoDB ReplicaSet Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..5e83b79c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,271 @@ +--- +title: Horizontal Scaling of MongoDB Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a MongoDB cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, MongoDB, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for MongoDB Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a MongoDB cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB ReplicaSet Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running` with `secondary` role +2. Data synced from primary to new replica +3. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the MongoDB cluster by adding 1 replica to mongodb component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-scale-out-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops mongo-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Running 0/1 9s + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Running 1/1 20s + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Succeed 1/1 20s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 4 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the MongoDB cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +New replicas automatically join as secondary nodes. +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 6m24s primary +mongo-cluster-mongodb-1 2/2 Running 0 7m19s secondary +mongo-cluster-mongodb-2 2/2 Running 0 5m57s secondary +mongo-cluster-mongodb-3 2/2 Running 0 3m54s secondary +``` + +Verify mongodb internal status using: + +1. login in any mongodb replica +```bash +kubectl exec -it -n demo mongo-cluster-mongodb-0 -- /bin/bash +mongosh "mongodb://${MONGODB_ROOT_USER}:${MONGODB_ROOT_PASSWORD}@127.0.0.1:27017/admin" +``` +2. check mongodb `rs.status()` +```bash +# login to mongodb and query +mongo-cluster-mongodb [direct: secondary] admin> rs.status() +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +2. If removing a primary replica, automatic switchover occurs first +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + +:::note +If the replica being scaled-in happens to be a primary replica, KubeBlocks will trigger a Switchover actions. And this pod will not be terminated until this Switchover action succeeds. +::: + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the MongoDB cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-scale-in-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops mongo-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Running 0/1 8s + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Running 1/1 24s + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 1 # decrease replicas to scale-out + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster,apps.kubeblocks.io/component-name=mongodb +NAME READY STATUS RESTARTS AGE +mongo-cluster-mongodb-0 2/2 Running 0 18m +``` + +## Troubleshooting +If the scale-in operation gets stuck for quite a long time, please check these resources: + +```bash +# Check agent logs on both current primary and candidate +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +If you get errors like the following from the primary replica: +```text: +INFO Action Executed {"action": "switchover", "result": "exit code: 1: failed"} +INFO HTTP API Called {"user-agent": "Go-http-client/1.1", "method": "POST", "path": "/v1.0/action", "status code": 200, "cost": 7} +``` + +It could be a switchover error, and please check KubeBlocks logs for more details. + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a MongoDB cluster. +- Perform scale-in operations to remove replicas from a MongoDB cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..bf1af3fc --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx @@ -0,0 +1,218 @@ +--- +title: Expanding Volume in a MongoDB Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a MongoDB cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, MongoDB, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a MongoDB Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a MongoDB cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a MongoDB ReplicaSet Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage MongoDB clusters. Below is an example configuration for deploying a MongoDB cluster with 2 replicas (1 primary, 1 secondary). + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the mongodb component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: mongodb + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops mongo-cluster-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: mongodb + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe mongo-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mongodb 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=mongo-cluster -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +mongo-cluster-mongodb-data-0 Bound pvc-uuid 30Gi RWO 33m +mongo-cluster-mongodb-data-1 Bound pvc-uuid 30Gi RWO 33m +mongo-cluster-mongodb-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your MongoDB cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..ccb56a6a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,333 @@ +--- +title: Create and Destroy MongoDB Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage MongoDB services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, MongoDB, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage MongoDB Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage MongoDB Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing MongoDB services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB ReplicaSet Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## View Network Services +List the Services created for the MongoDB cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +Example Services: +```bash +# service for to all replicas +mongo-cluster-mongodb ClusterIP 10.96.249.157 27017/TCP 44m +# read-write service +mongo-cluster-mongodb-mongodb ClusterIP 10.96.17.58 27017/TCP 44m +# read-only servcie +mongo-cluster-mongodb-mongodb-ro ClusterIP 10.96.2.71 27017/TCP 44m +``` + +## Expose MongoDB Service + +External service addresses enable public internet access to MongoDB, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the MongoDB service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: mongo-cluster + expose: + - componentName: mongodb + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + # Specifies a role to target with the service. + # If specified, the service will only be exposed to pods with the matching + # role. + roleSelector: primary + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops mongo-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-expose-enable-ops Expose mongo-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: mongo-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: mongodb + name: mongodb-internet + serviceName: mongodb-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: mongodb + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +mongo-cluster-mongodb-internet LoadBalancer 172.20.60.24 27017:31243/TCP 1m +``` + +### Wait for DNS Propagation + +The LoadBalancer DNS name may take 2-5 minutes to become resolvable. Verify the resolution status: + +```bash +nslookup # replace with the real IP from previous output. +``` + +## Connect to MongoDB Externally + +### Retrieve Credentials + +KubeBlocks automatically creates a Secret containing the MongoDB root credentials. Retrieve the MongoDB root credentials: +```bash +NAME=`kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 -d` +``` + +### Connect Using MongoDB Client + +You can now connect to the MongoDB database externally (e.g., from your laptop or EC2): +```bash +mongosh "mongodb://<$NAME>:<$PASSWD>@:27017/admin" +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: mongo-cluster + expose: + - componentName: mongodb + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops mongo-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-expose-disable-ops Expose mongo-cluster Succeed 1/1 24s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Running 24m + ``` + + + +### Verify Service Removal + +Ensure that the 'mongo-cluster-mongodb-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +Expected Result: The 'mongo-cluster-mongodb-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a MongoDB service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing MongoDB services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/08-switchover.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/08-switchover.mdx new file mode 100644 index 00000000..f924fb91 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/08-switchover.mdx @@ -0,0 +1,181 @@ +--- +title: MongoDB Cluster Switchover +description: Perform planned role transitions in MongoDB clusters with KubeBlocks for minimal downtime and controlled maintenance +keywords: [MongoDB, KubeBlocks, Switchover, High Availability, Role Transition, Kubernetes] +sidebar_position: 8 +sidebar_label: MongoDB Switchover +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB Cluster Switchover + +A **switchover** is a planned operation that transfers the primary role from one MongoDB instance to another. Unlike failover which occurs during failures, switchover provides: +- Controlled role transitions +- Minimal downtime (typically a few hundred milliseconds) +- Predictable maintenance windows + +Switchover is ideal for: +- Node maintenance/upgrades +- Workload rebalancing +- Testing high availability +- Planned infrastructure changes + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Check Roles +List the Pods and their roles (primary or secondary): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster,apps.kubeblocks.io/component-name=mongodb -L kubeblocks.io/role +``` + +Example Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 20m primary +mongo-cluster-mongodb-1 2/2 Running 0 21m secondary +mongo-cluster-mongodb-2 2/2 Running 0 19m secondary +``` + +## Performing a Planned Switchover + +To initiate a planned switchover, create an OpsRequest resource as shown below: + + + + Option 1: Automatic Switchover (No preferred candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-switchover-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: Switchover + switchover: + - componentName: mongodb + instanceName: mongo-cluster-mongodb-0 + ``` + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + + + + Option 2: Targeted Switchover (Specific candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-switchover-targeted + namespace: demo + spec: + clusterName: mongo-cluster + type: Switchover + switchover: + - componentName: mongodb + # Specifies the instance whose role will be transferred. + # A typical usage is to transfer the leader role in a consensus system. + instanceName: mongo-cluster-mongodb-0 + # If CandidateName is specified, the role will be transferred to this instance. + # The name must match one of the pods in the component. + # Refer to ComponentDefinition's Swtichover lifecycle action for more details. + candidateName: mongo-cluster-mongodb-1 + ``` + + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + - `candidateName`: If candidate name is specified, the role will be transferred to this instance. + + + +## Monitoring the Switchover + +Monitor the switchover progress: + +```bash +kubectl get ops mongodb-switchover-ops -n demo -w +``` + +Expected Result: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +mongodb-switchover-ops Switchover mongo-cluster Succeed 1/1 33s +``` +## Verify the Switchover + +After the switchover is executed, the specified instance will be promoted to the primary role, while the previously primary instance will take on the secondary role. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role +``` + +Expected Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 23m secondary +mongo-cluster-mongodb-1 2/2 Running 0 24m primary +mongo-cluster-mongodb-2 2/2 Running 0 23m secondary +``` + +In this example: +- Pod 'mongo-cluster-mongodb-1' has been promoted to the primary role. +- Pod 'mongo-cluster-mongodb-0' has transitioned to the secondary role. + +## Troubleshooting + +### Common Switchover Issues + +If the switchover operation gets stuck, check these resources: +```bash +# Check agent logs on both current primary and candidate +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=mongo-cluster + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +## Summary + +This guide demonstrated how to: +1. Deploy a MongoDB HA cluster +2. Perform both automatic and targeted Switchover +3. Verify role transitions + +**Key takeaways:** +- Switchover enables controlled maintenance with minimal downtime (~100-500ms) +- KubeBlocks provides declarative operations for reliable role transitions +- Always verify: + - Cluster status immediately after switchover + - Application connectivity + - Replication health +- Check logs for troubleshooting: + - KubeBlocks operator (kb-system namespace) + - kbagent on database pods + diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..de10eb95 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,139 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed MongoDB Clusters +description: Learn how to decommission (take offline) a specific Pod in a MongoDB cluster managed by KubeBlocks. +keywords: [KubeBlocks, MongoDB, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission MongoDB Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed MongoDB Clusters + +This guide explains how to decommission (take offline) specific Pods in MongoDB clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + +To decommission a specific Pod (e.g., 'mongo-cluster-mongodb-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-decommission-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + scaleIn: + onlineInstancesToOffline: + - 'mongo-cluster-mongodb-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops mongo-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-decommission-ops HorizontalScaling mongo-cluster Succeed 1/1 5s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 1 # explected replicas after decommission + offlineInstances: + - mongo-cluster-mongodb-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +mongo-cluster-mongodb-0 2/2 Running 0 25m +mongo-cluster-mongodb-2 2/2 Running 0 24m +``` + +Login to MongoDB replica and check : +```bash +# login to any mongodb replica: +mongo-cluster-mongodb [direct: secondary] admin> rs.status() +``` +Verify the change in `members`. + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..c14f8d53 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,125 @@ +--- +title: Create a Backup Repository for KubeBlocks +description: Learn how to create and configure a BackupRepo for KubeBlocks using an S3 bucket to store backup data. +keywords: [KubeBlocks, Backup, BackupRepo, S3, Kubernetes] +sidebar_position: 1 +sidebar_label: Create BackupRepo +--- + +# Create a BackupRepo for KubeBlocks + +This guide walks you through creating and configuring a BackupRepo in KubeBlocks using an S3 bucket for storing backup data. + +## Prerequisites +- AWS CLI configured with appropriate permissions to create S3 buckets. +- kubectl access to your Kubernetes cluster. +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) and running in the kb-system namespace. + +## Step 1: Create S3 Bucket + +Use the AWS CLI to create an S3 bucket in your desired region. Replace `` with your target AWS region (e.g., `us-east-1`, `ap-southeast-1`). + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +Example (for us-west-1): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +Example Output: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +Verification: +Confirm the bucket was created by listing its contents (it will be empty initially): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## Step 2: Create a Kubernetes Secret for AWS Credentials + +Store your AWS credentials securely in a Kubernetes Secret. Replace `` and `` with your actual AWS credentials: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## Step 3: Configure Backup Repository + +A BackupRepo is a custom resource that defines a storage repository for backups. In this step, you'll integrate your S3 bucket with KubeBlocks by creating a BackupRepo resource. + +Apply the following YAML to create the BackupRepo. Replace fields(e.g., bucket name, region) with your specific settings. + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # mark this backuprepo as default one + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # Currently, KubeBlocks supports configuring various object storage services as backup repositories + # - s3 (Amazon Simple Storage Service) + # - oss (Alibaba Cloud Object Storage Service) + # - cos (Tencent Cloud Object Storage) + # - gcs (Google Cloud Storage) + # - obs (Huawei Cloud Object Storage) + # - minio, and other S3-compatible services. + storageProviderRef: s3 + # Specifies the access method of the backup repository. + # - Tool + # - Mount + accessMethod: Tool + # Specifies reclaim policy of the PV created by this backup repository. + pvReclaimPolicy: Retain + # Specifies the capacity of the PVC created by this backup repository. + volumeCapacity: 100Gi + # Stores the non-secret configuration parameters for the StorageProvider. + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # References to the secret that holds the credentials for the StorageProvider. + credential: + # name is unique within a namespace to reference a secret resource. + name: s3-credential-for-backuprepo + # namespace defines the space within which the secret name must be unique. + namespace: kb-system +``` + +## Step 4: Verify Backup Repository Status + +Check the status of the BackupRepo to ensure it is correctly initialized: + +```bash +kubectl get backuprepo s3-repo -w +``` + +Expected Status Flow: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +Troubleshooting: + - If status becomes Failed: + - Verify bucket name and region match your S3 configuration. + - Confirm AWS credentials in the Secret are correct. + - Check network connectivity between KubeBlocks and AWS S3. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..1ccf6c5c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,223 @@ +--- +title: Create a Full Backup for a MongoDB Cluster on KubeBlocks +description: Step-by-step guide to creating and validating full backups for MongoDB clusters using Backup API and OpsRequest API in KubeBlocks. +keywords: [MongoDB, Full Backup, KubeBlocks, Kubernetes, Database Backup, XtraBackup] +sidebar_position: 2 +sidebar_label: Create Full Backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Create a Full Backup for MongoDB on KubeBlocks + +This guide demonstrates how to create and validate full backups for MongoDB clusters on KubeBlocks using the `pg-basebackup` method through both: +- The Backup API (direct backup operations) +- The OpsRequest API (managed backup operations with enhanced monitoring) + +We will cover how to restore data from a backup in the [Restore From Full Backup](./05-restoring-from-full-backup) guide. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Backup Prerequisites + +Before creating backups, ensure: +1. Backup repository is configured: + - `BackupRepo` resource exists + - Network connectivity between cluster and repository + - `BackupRepo` status shows "Ready" + +2. Cluster is ready: + - Cluster status is "Running" + - No ongoing operations (scaling, upgrades, etc.) + +## Identify Backup Configuration + +Check available backup policies and schedules: + +```bash +# List backup policies +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=mongo-cluster + +# List backup schedules +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +Expected Output: +```bash +NAME BACKUP-REPO STATUS AGE +mongo-cluster-mongodb-backup-policy Available 62m + +NAME STATUS AGE +mongo-cluster-mongodb-backup-schedule Available 62m +``` + +View supported backup methods in the BackupPolicy CR 'mongo-cluster-mongodb-backup-policy': + +```bash +kubectl get backuppolicy mongo-cluster-mongodb-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**List of Backup methods** + +KubeBlocks MongoDB supports these backup methods: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | dump | Uses `mongodump`, a MongoDB utility used to create a binary export of the contents of a database | +| Full Backup | datafile | Backup the data files of the database | +| Continuous Backup | archive-oplog | Continuously archives MongoDB oplog using `wal-g` | + +## Backup via Backup API + +### 1. Create On-Demand Backup + +The `datafile` method backup the data files of the database + +Apply this manifest to create a backup: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: mongodb-backup-datafile + namespace: demo +spec: + # Specifies the backup method name that is defined in the backup policy. + # - dump + # - volume-snapshot + # - datafile + backupMethod: datafile + # Specifies the backup policy to be applied for this backup. + backupPolicyName: mongo-cluster-mongodb-backup-policy + # Determines whether the backup contents stored in the backup repository should be deleted when the backup custom resource(CR) is deleted. Supported values are `Retain` and `Delete`. + # - `Retain` means that the backup content and its physical snapshot on backup repository are kept. + # - `Delete` means that the backup content and its physical snapshot on backup repository are deleted. + deletionPolicy: Delete +``` + +### 2. Monitor Backup and Verify Completion + +Track progress until status shows "Completed": + +```bash +kubectl get backup mongodb-backup-datafile -n demo -w +``` + +Example Output: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Running 1119698 Delete 2025-05-18T14:21:16Z +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Running 1119698 Delete 2025-05-18T14:21:16Z +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Completed 1119698 15s Delete 2025-05-18T14:21:16Z 2025-05-18T14:21:31Z +``` + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Check files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Backup file size + + +## Backup via OpsRequest API + +### 1. Create On-Demand Backup + +Execute a backup using the OpsRequest API with the 'pg-basebackup' method: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-backup + namespace: demo +spec: + clusterName: mongo-cluster + force: false + backup: + backupPolicyName: mongo-cluster-mongodb-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. Monitor Backup Progress + +#### 1. Monitor Operation Status + +Track backup progress in real-time: +```bash +kubectl get ops mongo-cluster-backup -n demo -w +``` + +Expected Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +mongo-cluster-backup Backup mongo-cluster Running -/- 5s +mongo-cluster-backup Backup mongo-cluster Succeed -/- 10s +``` + +- A STATUS of 'Succeed' indicates the backup operation completed successfully. + +#### 2. Verify Completion + +Check the final backup status: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=mongo-cluster-backup +``` + +Example Output: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-mongo-cluster-20250518142234 mongo-cluster-mongodb-backup-policy datafile kb-oss Completed 1149575 11s Delete 2025-05-18T14:22:34Z 2025-05-18T14:22:44Z 2025-06-17T14:22:44Z +``` + +- The backup status should show 'Completed'. + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Other metadata + +## Summary + +This guide covered: +1. Deploying a replication MongoDB cluster +2. Creating full backups using: + - Direct Backup API + - Managed OpsRequest API +3. Monitoring and validating backups + +Your MongoDB data is now securely backed up and ready for restoration when needed. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..b8bfc1f9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,150 @@ +--- +title: Setting Up a MongoDB Cluster with Scheduled Backups in KubeBlocks +description: Learn how to deploy a MongoDB cluster using KubeBlocks and configure automated scheduled backups with retention in an S3 repository. +keywords: [MongoDB, Backup, KubeBlocks, Scheduled Backup, Kubernetes] +sidebar_position: 3 +sidebar_label: Scheduled Backups +--- + + +# Setting Up a MongoDB Cluster with Scheduled Backups in KubeBlocks + +This guide demonstrates how to deploy a MongoDB cluster using KubeBlocks and configure scheduled backups with retention in an S3 repository. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a MongoDB Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## Configure Scheduled Backups + +KubeBlocks automatically creates a `BackupSchedule` resource when the cluster is created. Follow these steps to enable and configure scheduled backups: + +1. Verify the default backup schedule configuration: + +```bash +kubectl get backupschedule mongo-cluster-mongodb-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: mongo-cluster-MongoDB-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── minute (0-59) + # │ ┌───────────── hour (0-23) + # │ │ ┌───────────── day of month (1-31) + # │ │ │ ┌───────────── month (1-12) + # │ │ │ │ ┌───────────── day of week (0-6) (Sunday=0) + # │ │ │ │ │ + # 0 18 * * * + # schedule this job every day at 6:00 PM (18:00). + cronExpression: 0 18 * * * # update the cronExpression to your need + enabled: true # set to `true` to schedule base backup periodically + retentionPeriod: 7d # set the retention period to your need +``` + +2. Enable and customize the backup schedule: +```bash +kubectl edit backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +Update these key parameters: +- `enabled`: Set to `true` to activate scheduled backups +- `cronExpression`: Configure backup frequency using cron syntax +- `retentionPeriod`: Set how long to keep backups (e.g., `7d`, `1mo`) + +Example configuration for daily backups at 6PM UTC with 7-day retention: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. Verify the schedule configuration: +```bash +# Check schedule status +kubectl get backupschedule mongo-cluster-mongodb-backup-schedule -n demo -w + +# View detailed configuration +kubectl describe backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +## Monitoring and Managing Backups + +After enabling scheduled backups, monitor their execution and manage backup retention: + +1. View all backups: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +2. Inspect backup details: +```bash +kubectl describe backup -n demo +``` + +3. Verify backup artifacts: +- Status should show "Completed" +- Check backup size matches expectations +- Confirm retention period is being applied +- Validate backup files exist in repository + +4. Manage backup retention: +- To manually delete old backups: +```bash +kubectl delete backup -n demo +``` +- To modify retention period: +```bash +kubectl edit backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated: +1. Configuration of automated MongoDB backups +2. Schedule customization using cron syntax +3. Retention policy management +4. Backup verification procedures + +Your MongoDB cluster now has: +- Regular automated backups +- Configurable retention policies +- Complete backup history tracking diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..6bb17df7 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,181 @@ +--- +title: Setting Up a MongoDB Cluster with Scheduled Continuous Backup in KubeBlocks +description: Learn how to set up a MongoDB cluster with scheduled full backups and continuous incremental backups enabled in KubeBlocks. +keywords: [MongoDB, Backup, PITR, KubeBlocks, Kubernetes] +sidebar_position: 4 +sidebar_label: Scheduled Continuous Backup +--- + +# Setting Up a MongoDB Cluster with Scheduled Continuous Backup Enabled in KubeBlocks + +This guide demonstrates how to configure a MongoDB cluster on KubeBlocks with: + +- Scheduled full backups (base backups) +- Continuous WAL (Write-Ahead Log) archiving +- Point-In-Time Recovery (PITR) capabilities + +This combination provides comprehensive data protection with minimal recovery point objectives (RPO). + +## What is PITR? +Point-In-Time Recovery (PITR) allows you to restore a database to a specific moment in time by combining full backups with continuous binlog/wal/archive log backups. + +For details on restoring data from both full backups and continuous binlog backups, refer to the [Restore From PITR](restore-with-pitr.mdx) guide. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## List of Backup methods + +KubeBlocks MongoDB supports these backup methods: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | dump | Uses `mongodump`, a MongoDB utility used to create a binary export of the contents of a database | +| Full Backup | datafile | Backup the data files of the database | +| Continuous Backup | archive-oplog | Continuously archives MongoDB oplog using `wal-g` | + +## Deploy a MongoDB ReplicaSet Cluster with Backup APIs + +Deploy a MongoDB ReplicaSet Cluster with 3 replicas and specify backup information: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + backup: + retentionPeriod: 7d + # for full backup + method: datafile # full backup methnod name + enabled: true + cronExpression: 0 18 * * * # full backup scheuler + # for continuous backup + continuousMethod: archive-oplog # continuous backup method, paired with method wal-g + pitrEnabled: true # enable continous method or not + repoName: s3-repo # specify backuprepo, if not specified, the BackupRepo annotated as `default` will be used. +``` + +**Key Configuration Fields Explained** + +| Field | Value | Description | +|-------|-------|-------------| +| `backup.enabled` | `true` | Enables scheduled backups | +| `method` | `datafile` | Full backup method using PostgreSQL's native utility | +| `cronExpression` | `0 18 * * *` | Daily full backup at 6PM UTC | +| `retentionPeriod` | `7d` | Retains backups for 7 days | +| `repoName` | `s3-repo` | Backup repository name (S3-compatible storage) | +| `pitrEnabled` | `true` | Enables continuous WAL archiving for PITR | +| `continuousMethod` | `archive-oplog` | Method for continuous WAL archiving | + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Monitoring Continuous Backups + +Verify continuous backup operation with these commands: +```bash +# get continuous backup +kubectl get backup -l app.kubernetes.io/instance=mongo-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# get pod working for continuous backup +kubectl get pod -l app.kubernetes.io/instance=mongo-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +## Verifying Backup Configuration + +KubeBlocks automatically creates a `BackupSchedule` resource. Inspect the configuration: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +... +spec: + backupPolicyName: mongo-cluster-mongodb-backup-schedule + schedules: + - backupMethod: datafile + cronExpression: 0 18 * * * + enabled: true # + retentionPeriod: 7d + - backupMethod: archive-oplog + cronExpression: '*/5 * * * *' + enabled: true # set to `true` to enable continuous backup + retentionPeriod: 8d # set the retention period to your need +``` + +1. **Full Backups** (datafile): + - Backup the data files of mongodb + - Runs on configured schedule (daily by default) + - Serves as base for PITR + +2. **Continuous Backups** (archive-oplog): + - Continuously archives MongoDB oplog using wal-g + - Uses datasafed as storage backend with zstd compression + - Maintains backup metadata including size and time ranges + - Automatically purges expired backups + - Verifies MongoDB primary status and process health + +## Summary + +This guide covered: +1. Configuring scheduled full backups with pg-basebackup +2. Enabling continuous WAL archiving with wal-g-archive +3. Setting up Point-In-Time Recovery (PITR) capabilities +4. Monitoring backup operations + +Key Benefits: +- Scheduled full backups ensure regular recovery points +- Continuous WAL archiving minimizes potential data loss +- PITR enables recovery to any moment in time \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..65945901 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,161 @@ +--- +title: Restore a MongoDB Cluster from Backup +description: Learn how to restore a new MongoDB cluster from an existing backup in KubeBlocks using the Cluster Annotation or OpsRequest API. +keywords: [MongoDB, Restore, Backup, KubeBlocks, Kubernetes] +sidebar_position: 5 +sidebar_label: Restore MongoDB Cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a MongoDB Cluster from Backup + +This guide demonstrates two methods to restore a MongoDB cluster from backup in KubeBlocks: + +1. **Cluster Annotation Method** - Simple declarative approach using YAML annotations +2. **OpsRequest API Method** - Enhanced operational control with progress monitoring + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Preparing for Restoration: Locate one Full Backup +Before restoring, ensure that there is a full backup available. The restoration process will use this backup to create a new MongoDB cluster. + +- Backup repository accessible from new cluster +- Valid full backup in `Completed` state +- Adequate CPU/memory resources +- Sufficient storage capacity + +Find available full backups: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=mongo-cluster # get the list of full backups +``` + +Pick ONE of the Backups whose status is `Completed`. + +## Option 1: Cluster Annotation Restoration + +### Step 1: Create Restored Cluster +Create a new cluster with restore configuration: + +Key parameters: +- `kubeblocks.io/restore-from-backup` annotation +- Backup name and namespace located from the previous steps + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster-restored + namespace: demo + annotations: + # NOTE: replace with your backup + kubeblocks.io/restore-from-backup: '{"mongodb":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 2: Monitor Restoration +Track restore progress with: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +## Option 2: OpsRequest API Restoration + +### Step 1: Initiate Restore Operation +Create restore request via OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restore + namespace: demo +spec: + clusterName: mongo-cluster-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### Step 2: Track Operation Progress +Monitor restore status: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +### Step 3: Validate Restored Cluster +Confirm successful restoration: +```bash +kubectl get cluster mongo-cluster-restored -n demo +``` +Example Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster-restored mongodb Delete Running 3m3s +``` + + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete cluster mongo-cluster-restored -n demo +kubectl delete ns demo +``` + +## Summary + +This guide covered two restoration methods: + +1. **Cluster Annotation** - Simple YAML-based approach + - Retrieve system credentials + - Create cluster with restore annotation + - Monitor progress + +2. **OpsRequest API** - Enhanced operational control + - Create restore request + - Track operation status + - Verify completion diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..a0fa8c40 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,181 @@ +--- +title: Restore a MongoDB Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks +description: Learn how to restore a MongoDB cluster using a full backup and continuous binlog backup for Point-In-Time Recovery (PITR) on KubeBlocks. +keywords: [MongoDB, Full Backup, PITR, KubeBlocks] +sidebar_position: 6 +sidebar_label: Restore with PITR +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a MongoDB Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks + +This guide demonstrates how to perform Point-In-Time Recovery (PITR) for MongoDB clusters in KubeBlocks using: + +1. A full base backup +2. Continuous WAL (Write-Ahead Log) backups +3. Two restoration methods: + - Cluster Annotation (declarative approach) + - OpsRequest API (operational control) + +PITR enables recovery to any moment within the `timeRange` specified. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Prepare for PITR Restoration +To perform a PITR restoration, both a full backup and continuous backup are required. Refer to the documentation to configure these backups if they are not already set up. + +- Completed full backup +- Active continuous WAL backup +- Backup repository accessible +- Sufficient resources for new cluster + +To identify the list of full and continuous backups, you may follow the steps: + +### 1. Verify Continuous Backup +Confirm you have a continuous WAL backup, either running or completed: + +```bash +# expect EXACTLY ONE continuous backup per cluster +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=mongo-cluster +``` + +### 2. Check Backup Time Range +Get the valid recovery window: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +Expected Output: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. Identify Full Backup +Find available full backups that meet: +- Status: Completed +- Completion time after continuous backup start time + +```bash +# expect one or more Full backups +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=mongo-cluster +``` + +:::tip +KubeBlocks automatically selects the most recent qualifying full backup as the base. +Make sure there is a full backup meets the condition: its `stopTime`/`completionTimestamp` must **AFTER** Continuous backup's `startTime`, otherwise PITR restoration will fail. +::: + +## Option 1: Cluster Annotation Restoration + +### Step 1: Create Restored Cluster +Configure PITR parameters in cluster annotation: + +Key parameters: +- `name`: Continuous backup name +- `restoreTime`: Target recovery time (within backup `timeRange`) + +Apply this YAML configuration: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster-restore-pitr + namespace: demo + annotations: + # NOTE: replace with the continuouse backup name + # NOTE: replace with a valid time within the backup timeRange. + kubeblocks.io/restore-from-backup: '{"mongodb":{"name":"","namespace":"demo","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 2: Monitor Restoration +Track restore progress with: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +## Option 2: OpsRequest API Restoration + +For operational control and monitoring, use the OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restore + namespace: demo +spec: + clusterName: mongo-cluster-restore + force: false + restore: + backupName: + backupNamespace: demo + restorePointInTime: + type: Restore +``` + +### Monitor Restoration +Track progress with: + +```bash +# Watch restore operation +kubectl get restore -n demo -w + +# Verify cluster status +kubectl get cluster -n demo -w +``` + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete cluster mongo-cluster-restore -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to restore a MongoDB cluster in KubeBlocks using a full backup and continuous backup for Point-In-Time Recovery (PITR). Key steps included: +- Verifying available backups. +- Extracting encrypted system account credentials. +- Creating a new MongoDB cluster with restoration configuration. +- Monitoring the restoration process. + +With this approach, you can restore a MongoDB cluster to a specific point in time, ensuring minimal data loss and operational continuity. + diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/_category_.yml new file mode 100644 index 00000000..cd4faeaf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Backup And Restores +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..da36de8f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,135 @@ +--- +title: Create a MongoDB Cluster with a Custom Root Password on KubeBlocks +description: Learn how to deploy a MongoDB cluster on KubeBlocks with a custom root password securely configured using Kubernetes Secrets. +keywords: [MongoDB, KubeBlocks, Custom Password, Kubernetes, Secrets] +sidebar_position: 1 +sidebar_label: Custom Password +--- + +# Create MongoDB Cluster With Custom Password on KubeBlocks + +This guide demonstrates how to deploy a MongoDB cluster in KubeBlocks with a custom root password stored in a Kubernetes Secret. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the MongoDB ReplicaSet Cluster + +KubeBlocks uses a declarative approach for managing MongoDB clusters. Below is an example configuration for deploying a MongoDB cluster with 2 nodes (1 primary, 1 replicas) and a custom root password. + +### Step 1: Create a Secret for the Defaults Account + +The custom root password is stored in a Kubernetes Secret. Create the Secret by applying the following YAML: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-secret + namespace: demo +``` +- password: Replace custompassword with your desired password and encode it using Base64 (`echo -n "custompassword" | base64`). +- username: The default MongoDB root user is 'root', encoded as 'cm9vdA=='. + + +### Step 2: Deploy the MongoDB Cluster + +Apply the following manifest to deploy the MongoDB cluster, referencing the Secret created in Step 1 for the root account: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + systemAccounts: # override systemaccount password + - name: root + secretRef: + name: custom-secret + namespace: demo + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `systemAccounts`: Overrides system accounts defined in the referenced `ComponentDefinition`. + +:::tip + +In KubeBlocks MongoDB Addon, a list of system accounts is defined. And only those accounts can be customized with a new secret. + +::: + +To get the of accounts: +```bash +kubectl get cmpd mongodb-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +Expected Output: +```bash +root +``` + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Connecting to the MongoDB Cluster + +KubeBlocks automatically creates a secret containing the MongoDB root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +To connect to the cluster's primary node, use the MongoDB client with the custom password: +```bash +kubectl exec -it -n demo mongo-cluster-mongodb-0 -c mongodb -- mongosh "mongodb://root:custompassword@127.0.0.1:27017/admin" +``` + +## Cleanup +To remove all created resources, delete the MongoDB cluster along with its namespace: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete secret custom-secret -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you: +- Created a Kubernetes Secret to securely store a custom MongoDB root password. +- Deployed a MongoDB cluster in KubeBlocks with a custom root password. +- Verified the deployment and connected to the cluster's primary node using the MongoDB client. + +Using Kubernetes Secrets ensures secure credential management for your MongoDB clusters, while KubeBlocks simplifies the deployment and management process. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/_category_.yml new file mode 100644 index 00000000..bf29dd85 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +position: 6 +label: Custom Secret +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_category_.yml new file mode 100644 index 00000000..9ca610c2 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for MongoDB Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..f52a13e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx @@ -0,0 +1,36 @@ +KubeBlocks uses a declarative approach for managing MongoDB Replication Clusters. +Below is an example configuration for deploying a MongoDB ReplicaSet Cluster with one primary replica and two secondary replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..ef8dcb1f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster mongo-cluster -n demo -w +``` + +Expected Output: + +```bash +kubectl get cluster mongo-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster mongodb Delete Creating 49s +mongo-cluster mongodb Delete Running 62s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 78s primary +mongo-cluster-mongodb-1 2/2 Running 0 63s secondary +mongo-cluster-mongodb-2 2/2 Running 0 48s secondary +``` + +Once the cluster status becomes Running, your MongoDB cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/01-overview.mdx new file mode 100644 index 00000000..e70e9879 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/01-overview.mdx @@ -0,0 +1,73 @@ +--- +title: Overview of KubeBlocks MySQL Addon +description: Learn about the features and capabilities of the KubeBlocks MySQL addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [MySQL, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks MySQL Addon + +The **KubeBlocks MySQL Addon** offers a comprehensive solution for deploying and managing MySQL clusters in Kubernetes. This document provides an overview of its features, including deployment topologies, lifecycle management options, backup and restore functionality, and supported MySQL versions. + +## Features + +### Topologies +The **KubeBlocks Operator** supports deploying MySQL in three different topologies, tailored to meet varying requirements for performance, consistency, and high availability: + + +| Features | Description +|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| SemiSync | Leverages MySQL’s semi-synchronous replication mechanism to achieve near-real-time data consistency.
• Requires at least one replica to acknowledge receipt of the transaction before the primary commits.
• Balances performance and consistency by reducing the chance of data loss in case of a primary failure. | +| MySQL Group Replication (MGR) | Creates a distributed, multi-primary MySQL cluster using MySQL’s native Group Replication.
• Ensures fault-tolerant operations and automatic data synchronization across all nodes.
• Provides built-in conflict detection and resolution for continuous database availability. | +| Orchestrator Integration | Integrates an external Orchestrator for high-availability (HA) management.
• Adds automated monitoring and failover capabilities, including replica promotion.
• Allows dynamic handling of node failures or degradations, reducing downtime. | + +With these options, you can tailor your MySQL deployment to your specific requirements for performance, consistency, and availability. + +### Lifecycle Management + +KubeBlocks provides robust lifecycle management features to simplify MySQL cluster operations: + +| Features | Description | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------| +| ProxySQL | Use ProxySQL as database load balancer and query router for connection management and read-write splitting | +| Read Replicas | Secondary replicas can provide read-only services | +| Horizontal scaling | Modifying the number of replicas will trigger scale-in and scale-out operations | +| Vertical scaling | Ability to adjust compute resources (CPU and memory) for MySQL replicas | +| Volume Expansion | Support dynamic expansion of persistent storage volumes for MySQL replicas | +| Restart | Controlled restart of the MySQL cluster while maintaining high availability | +| Stop/Start | Ability to stop and start the MySQL cluster for maintenance | +| Custom root password | Ability to set and manage custom root password for the MySQL cluster during creation | +| Custom configuration template | Providing customized MySQL configuration file templates during creation | +| Dynamic Parameter Changes | Reconfigure MySQL parameters dynamically without requiring a cluster restart | +| Expose Custom Services | Ability to expose database services with custom configurations for different access requirements | +| Switchover | Planned primary-secondary switch operation with minimal downtime | +| Decommission specific replica | Safely take a specific MySQL replica offline for maintenance or decommissioning | +| Rebuild specific replica | Recover a replica in place or recreate it as a new replica to restore functionality | +| Minor Version Upgrade | Perform minor version upgrades of MySQL clusters without impacting availability | +| Advanced Pod Management | Support for custom Pod resources, custom scheduling policies, gradual rolling updates, and more advanced operational controls | +| TLS Encryption | Support for enabling or disabling TLS encryption for secure database connections | +| Prometheus Integration | Integration with Prometheus managed by the Prometheus Operator for monitoring and alerting of MySQL metrics | +| Loki Stack Integration | Integrate with Loki Stack to collect MySQL error logs, audit logs, and slow query logs for better observability | + + + +### Backup and Restore + +| Features | Methods | Description | +|-------------|------------|-----------------------------------------------------------------| +| Full Backup | xtrabackup | Uses `xtrabackup` to perform full backups | +| Continuous Backup | archive-binlog | Support for continuous backups to enable point-in-time recovery | + +### Supported Versions + +| Major Versions | Minor Versions | +|---------------|----------------------------------| +| 5.7 | 5.7.44 | +| 8.0 | [8.0.30-8.0.39] | +| 8.4 | 8.4.0,8.4.1,8.4.2 | + +The list of supported versions can be found by following command: +``` +kubectl get cmpv mysql +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/02-quickstart.mdx new file mode 100644 index 00000000..6f183644 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/02-quickstart.mdx @@ -0,0 +1,356 @@ +--- +title: Quickstart +description: Learn how to get started with KubeBlocks MySQL Add-on, including prerequisites, enabling the MySQL Add-on, creating a MySQL cluster, and managing it effectively, an alternative to dedicated operator. +keywords: [Kubernetes Operator, MySQL, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Quickstart + +This guide walks you through the process of getting started with the **KubeBlocks MySQL Add-on**, including prerequisites, enabling the add-on, creating a MySQL cluster, and managing the cluster with ease. + + +## Prerequisites + +This tutorial assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool and `helm` somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/setup/) and [Installing Helm](https://helm.sh/docs/intro/install/) for installation instructions for your platform. + +Also, this example requires KubeBlocks installed and running. Please see the [Install KubeBlocks](../user_docs/overview/install-kubeblocks) to install KubeBlocks. + + +### Enable MySQL Add-on + +Verify whether MySQL Addon is installed. By default, the MySQL Addon is installed along with the KubeBlocks Helm chart. +```bash +helm list -A +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +... +kb-addon-mysql kb-system 1 2024-12-16 00:28:52.78819557 +0000 UTC deployed mysql-1.0.0 5.7.44 +``` + +If MySQL Addon is not enabled, you can enable it by following the steps below. + +```bash +# Add Helm repo +helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts +# For users in Mainland China, if github is not accessible or very slow for you, please use following repo instead +#helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + +# Update helm repo +helm repo update +# Search versions of the Addon +helm search repo kubeblocks/mysql --versions +# Install the version you want (replace $version with the one you need) +helm upgrade -i mysql kubeblocks-addons/mysql --version $version -n kb-system +``` + +## Create A MySQL Cluster + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/cluster.yaml +``` + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mysql-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: mysql + # Specifies the ComponentDefinition custom resource (CR) that defines the + # Component's characteristics and behavior. + # Supports three different ways to specify the ComponentDefinition: + # - the regular expression - recommended + # - the full name - recommended + # - the name prefix + componentDef: "mysql-8.0" # match all CMPD named with 'mysql-8.0-' + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # When componentDef is "mysql-8.0", + # Valid options are: [8.0.30,8.0.31,8.0.32,8.0.33,8.0.34,8.0.35,8.0.36,8.0.37,8.0.38,8.0.39] + serviceVersion: 8.0.35 + # Determines whether metrics exporter information is annotated on the + # Component's headless Service. + # Valid options are [true, false] + disableExporter: false + # Specifies the desired number of replicas in the Component + replicas: 2 + # Specifies the resources required by the Component. + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +If you want to create a cluster of specified version, set `spec.componentSpecs.componentDef` (major version) and `spec.componentSpecs.serviceVersion` (major and minor version) field in the yaml file before applying it, for examples: + + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-5.7" means the major version is 5.7 + componentDef: "mysql-5.7" + # Valid options are: [5.7.44] + serviceVersion: 5.7.44 +``` + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-8.0" means the major version is 8.0 + componentDef: "mysql-8.0" + # Valid options are: [8.0.30,8.0.31,8.0.32,8.0.33,8.0.34,8.0.35,8.0.36,8.0.37,8.0.38,8.0.39] + serviceVersion: 8.0.35 +``` + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-8.4" means the major version is 8.4 + componentDef: "mysql-8.4" + # Valid options are: [8.4.0, 8.4.1, 8.4.2] + serviceVersion: 8.4.2 +``` + + + + + +The list of available componentDef can be found by following command: +```bash +kubectl get cmpd -l app.kubernetes.io/name=mysql +``` + +The list of supported versions can be found by following command: + +```bash +kubectl get cmpv mysql +``` + +When you create a MySQL cluster, KubeBlocks automatically creates a MySQL cluster that includes one primary replica and one secondary replica. The primary and secondary replicas are synchronized using semi-synchronous replication. + +When the cluster's status.phase changes to Running, it indicates that the cluster has been successfully created, and both the primary and secondary replicas have been started. + +```bash +kubectl get cluster mysql-cluster +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mysql-cluster Delete Running 22m + +kubectl get pods -l app.kubernetes.io/instance=mysql-cluster +NAME READY STATUS RESTARTS AGE +mysql-cluster-mysql-0 4/4 Running 0 31m +mysql-cluster-mysql-1 4/4 Running 0 31m +``` + +If you have installed `kbcli`, you can use the `kbcli` tool to quickly view important information related to the cluster. + +```bash +kbcli cluster describe mysql-cluster +Name: mysql-cluster Created Time: Dec 16,2024 08:37 UTC+0800 +NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY +default Running Delete + +Endpoints: +COMPONENT MODE INTERNAL EXTERNAL +mysql ReadWrite mysql-cluster-mysql.default.svc.cluster.local:3306 + +Topology: +COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql mysql-cluster-mysql-0 secondary Running ap-southeast-1b ip-10-0-2-243.ap-southeast-1.compute.internal/10.0.2.243 Dec 16,2024 08:37 UTC+0800 +mysql mysql-cluster-mysql-1 primary Running ap-southeast-1a ip-10-0-1-215.ap-southeast-1.compute.internal/10.0.1.215 Dec 16,2024 08:37 UTC+0800 + +Resources Allocation: +COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql false 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT TYPE IMAGE +mysql docker.io/apecloud/mysql:8.0.35 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n default mysql-cluster +``` + +## Connect to the MySQL Cluster + +When creating a MySQL Cluster, KubeBlocks creates a Secret named "mysql-cluster-mysql-account-root" to store the MySQL root username and password. + +```bash +kubectl get secret -l app.kubernetes.io/instance=mysql-cluster +NAME TYPE DATA AGE +mysql-cluster-mysql-account-kbadmin Opaque 2 61s +mysql-cluster-mysql-account-kbdataprotection Opaque 2 61s +mysql-cluster-mysql-account-kbmonitoring Opaque 2 61s +mysql-cluster-mysql-account-kbprobe Opaque 2 61s +mysql-cluster-mysql-account-kbreplicator Opaque 2 61s +mysql-cluster-mysql-account-proxysql Opaque 2 61s +mysql-cluster-mysql-account-root Opaque 2 61s +``` + +You can obtain the MySQL root username and password from secret 'mysql-cluster-mysql-account-root' using the following two commands: + +```bash +kubectl get secret mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 --decode + +kubectl get secret mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 --decode +``` + +KubeBlocks by default creates a Service of type ClusterIP named "mysql-cluster-mysql" to access the MySQL Cluster. + +```bash +kubectl get svc -l app.kubernetes.io/instance=mysql-cluster +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +mysql-cluster-mysql ClusterIP 172.20.253.119 3306/TCP 153m +``` + +You can log in to a Pod within the Kubernetes cluster, such as the primary replica of the MySQL Cluster, and access the database through this Service. Within the same Kubernetes cluster, the ClusterIP is accessible. + +```bash +kubectl exec -ti -n default mysql-cluster-mysql-0 -- mysql -h mysql-cluster-mysql -uroot -pkni676X2W1 +``` + +Alternatively, you can use the `kubectl port-forward` command to map port 3306 of the primary replica of the MySQL Cluster to port 3306 on your local machine: + +```bash +kubectl port-forward svc/mysql-cluster-mysql 3306:3306 -n default +Forwarding from 127.0.0.1:3306 -> 3306 +Forwarding from [::1]:3306 -> 3306 +``` + +Then, open another shell and use the mysql command-line tool to connect to the local port 3306: + +```bash +mysql -h 127.0.0.1 -P3306 -uroot -pkni676X2W1 +``` + +Using `kubectl exec` and `kubectl port-forward` are methods intended for quickly testing the operator's functionality and should not be used in production environments. In production, you should use a Service to access the MySQL Cluster. If accessing the database from outside Kubernetes, a LoadBalancer or NodePort type Service that provides an EXTERNAL-IP is required. Refer to [Accessing MySQL Cluster](./04-operations/05-manage-loadbalancer) to configure the Service in your environment. + +## Stop the MySQL Cluster + +Stop the cluster will release all the Pods of the cluster, but the PVC, Secret, ConfigMap and Service resources will be retained. It is useful when you want to save the cost of the cluster. + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/stop.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mysql-stop + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: mysql-cluster + type: Stop +``` + +Alternatively, you may stop the cluster by setting the `spec.componentSpecs.stop` field to true. + +```bash +kubectl edit cluster mysql-cluster +``` + +```yaml +spec: + componentSpecs: + - name: mysql + stop: true # set stop `true` to stop the component + replicas: 2 +``` + +## Start the Stopped MySQL Cluster + +Start the stopped cluster + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/start.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mysql-start + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: mysql-cluster + type: Start +``` + +Alternatively, you may start the stopped cluster by setting the `spec.componentSpecs.stop` field to false. + +```bash +kubectl edit cluster mysql-cluster +``` + +```yaml +spec: + componentSpecs: + - name: mysql + stop: false # set to `false` (or remove this field) to start the component + replicas: 2 +``` + +## Destroy the MySQL Cluster + +You can delete the Cluster using the following command: + +```bash +kubectl delete cluster mysql-cluster +``` + +The behavior when deleting the Cluster depends on the value of the terminationPolicy field: +- If the terminationPolicy value is DoNotTerminate, deleting the Cluster will not remove any resources related to the Cluster. +- If the terminationPolicy value is Delete, deleting the Cluster will remove all resources related to the Cluster, including PVC, Secret, ConfigMap, and Service. +- If the terminationPolicy value is WipeOut, deleting the Cluster will remove all resources related to the Cluster, including PVC, Secret, ConfigMap, and Service, as well as snapshots and backups in the external storage. + +In a testing environment, you can delete the Cluster using the following command to release all resources. + +```bash +kubectl patch cluster mysql-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" +kubectl delete cluster mysql-cluster +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/01-semisync.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/01-semisync.mdx new file mode 100644 index 00000000..c39b617d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/01-semisync.mdx @@ -0,0 +1,322 @@ +--- +title: Deploying a MySQL Semi-Synchronous Cluster with KubeBlocks +description: Learn how to deploy a MySQL semi-synchronous replication cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, MySQL, Semi-Synchronous Replication, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: MySQL Semi-Synchronous Cluster +--- + +# Deploying a MySQL Semi-Synchronous Cluster with KubeBlocks + +**Semi-synchronous replication** improves data consistency between the primary and replica nodes by requiring the primary node to wait for acknowledgment from at least one replica before committing transactions. This guide walks you through the process of setting up a MySQL semi-synchronous replication cluster using KubeBlocks. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploying the MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode. + +Apply the following YAML configuration: +```yaml +kubectl apply -f - < + +Topology: +COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql example-mysql-cluster-mysql-0 primary Running ap-southeast-1a ip-10-0-1-93.ap-southeast-1.compute.internal/10.0.1.93 Dec 24,2024 09:09 UTC+0800 +mysql example-mysql-cluster-mysql-1 secondary Running ap-southeast-1b ip-10-0-2-253.ap-southeast-1.compute.internal/10.0.2.253 Dec 24,2024 09:09 UTC+0800 + +Resources Allocation: +COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql false 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT TYPE IMAGE +mysql docker.io/apecloud/mysql:8.0.35 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n default example-mysql-cluster +``` +### Verify Component Status +```bash +kubectl get component example-mysql-cluster-mysql -n demo +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +example-mysql-cluster-mysql mysql-8.0-1.0.0 8.0.35 Running 2m28s +``` + +## Connecting to the MySQL Cluster + +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +z475N4c6ib +``` + +### Connect to the Primary Instance +To connect to the cluster's primary node, use the MySQL client: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pz475N4c6ib +``` + +## Testing Semi-Synchronous Replication + +In this section, we will test the semi-synchronous replication of the MySQL cluster by verifying the roles of the pods and checking their replication statuses. + +### 1. Verify Pod Roles +Identify the primary and replica instances by checking the roles of the pods: +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Expected Output: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +### 2. Check Replication Status +#### Primary Node +Run the following command to check the semi-synchronous replication status on the primary node: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show status like 'Rpl%_status';" +``` +Example Output: +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_source_status: ON": This indicates that the primary instance is configured for semi-synchronous replication as the source (or master). +- "Rpl_semi_sync_replica_status: OFF": This indicates that the primary instance is not acting as a replica in the replication setup. + +#### Replica Node +Check the semi-synchronous replication status on the replica node: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show status like 'Rpl%_status';" +``` +Example Output: +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_replica_status: ON": This indicates that the secondary instance is acting as a semi-synchronous replica and is actively receiving and acknowledging changes from the primary instance. +- "Rpl_semi_sync_source_status: OFF": This indicates that the secondary instance is not acting as a source (or master) in the replication setup. + + +## Checking and Configuring Timeout + +Here’s an example command to check the current value of the 'rpl_semi_sync_source_timeout' variable. +This value is typically set via the 'SEMI_SYNC_TIMEOUT' environment variable. +If the 'SEMI_SYNC_TIMEOUT' environment variable is not explicitly set, the default value for 'rpl_semi_sync_source_timeout' is 10000 ms (10 seconds). + +### Check Current Timeout + +In the following example, you can see that the value has been configured to 3000 ms: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show variables like 'rpl_semi_sync_source_timeout';" +``` +Expected Output: +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| rpl_semi_sync_source_timeout | 3000 | ++------------------------------+-------+ +``` + +### Update Timeout +To update the timeout, modify the cluster configuration and reapply the YAML file. For example: + +```yaml +kubectl apply -f - < +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1b ip-10-0-2-221.ap-southeast-1.compute.internal/10.0.2.221 Feb 10,2025 08:32 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1a ip-10-0-1-188.ap-southeast-1.compute.internal/10.0.1.188 Feb 10,2025 08:32 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1b ip-10-0-2-221.ap-southeast-1.compute.internal/10.0.2.221 Feb 10,2025 08:34 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1a ip-10-0-1-188.ap-southeast-1.compute.internal/10.0.1.188 Feb 10,2025 08:34 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi +proxysql 500m / 500m 512Mi / 512Mi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +## Connecting to the MySQL Cluster + +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +22mue70Hx6 +``` + +### Connect via ProxySQL +Use ProxySQL to connect to the MySQL cluster: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -p22mue70Hx6 +``` +### Connect Directly to MySQL +Alternatively, connect directly to the MySQL instance: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -p22mue70Hx6 +``` + +## Testing Semi-Synchronous Replication + +### Verify Pod Roles +List all pods in the cluster and check their roles: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Expected Output: +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 primary +``` + +### Check Replication Status +Verify the replication status for the primary and replica nodes: + +### Primary Node +Run the following command on the primary node: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -p22mue70Hx6 -e "show status like 'Rpl%_status';" +``` +Expected Output: +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_replica_status: ON": This indicates that the secondary instance is acting as a semi-synchronous replica and is actively receiving and acknowledging changes from the primary instance. +- "Rpl_semi_sync_source_status: OFF": This indicates that the secondary instance is not acting as a source (or master) in the replication setup. + +### Replica Node +Run the following command on the replica node: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -p22mue70Hx6 -e "show status like 'Rpl%_status';" +``` +Expected Output: +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_source_status: ON": This indicates that the primary instance is configured for semi-synchronous replication as the source (or master). +- "Rpl_semi_sync_replica_status: OFF": This indicates that the primary instance is not acting as a replica in the replication setup. + +## Failover Testing +### Trigger a Failover +To test the failover mechanism, delete the primary pod: +```bash +kubectl delete pod example-mysql-cluster-mysql-1 -n demo +``` +This will trigger a failover, and the secondary instance will be promoted to the primary role. You can verify the new roles of the pods: +### Verify the Updated Roles +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Expected Output: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 +``` +After some time, the deleted pod will be recreated and rejoin the cluster as a replica: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +- Deploy a MySQL semi-synchronous replication cluster with ProxySQL using KubeBlocks. +- Verify the cluster's roles and replication status. +- Test failover mechanisms for high availability. +By combining a MySQL semi-synchronous cluster with ProxySQL, you can achieve seamless failover, efficient traffic management, and enhanced reliability for production-grade deployments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/03-mgr.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/03-mgr.mdx new file mode 100644 index 00000000..e2e5a204 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/03-mgr.mdx @@ -0,0 +1,209 @@ +--- +title: Deploying a MySQL Group Replication Cluster Using KubeBlocks +description: Learn how to deploy and manage a MySQL Group Replication (MGR) cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and cleanup. +keywords: [KubeBlocks, MySQL, Group Replication, Kubernetes, High Availability] +sidebar_position: 3 +sidebar_label: MySQL Group Replication Cluster +--- + +# Deploying a MySQL Group Replication Cluster Using KubeBlocks + +**MySQL Group Replication (MGR)** offers high availability and scalability by synchronizing data across multiple MySQL instances. It ensures that all nodes in the cluster participate in replication seamlessly, with automatic failover and self-healing capabilities. This guide walks you through deploying a MySQL Group Replication cluster using **KubeBlocks**, which simplifies the management and deployment of MySQL clusters in Kubernetes. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploying the MySQL Group Replication Cluster + +KubeBlocks uses a declarative approach to manage MySQL clusters. Below is an example configuration for deploying a MySQL Group Replication cluster with three nodes. + +Apply the following YAML configuration to deploy a MySQL Group Replication (MGR) cluster: +```yaml +kubectl apply -f - < + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-155.ap-southeast-1.compute.internal/10.0.3.155 Feb 10,2025 22:23 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-204.ap-southeast-1.compute.internal/10.0.3.204 Feb 10,2025 22:23 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-2 secondary Running ap-southeast-1c ip-10-0-3-75.ap-southeast-1.compute.internal/10.0.3.75 Feb 10,2025 22:23 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-mgr-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +## Checking Cluster Roles +To verify the roles of the MySQL instances ('primary' and 'secondary'), use the following command: +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + +## Connecting to the MySQL Cluster +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +q95G8nd87K +``` +### Connect to the Primary Node +To connect to the cluster's primary node, use the MySQL client: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pq95G8nd87K +``` + +## Check Group Replication Status + +Run the following query to check the status of the group replication cluster: +```sql +mysql> SELECT * FROM performance_schema.replication_group_members; +``` +Example Output: +```sql ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| group_replication_applier | a17c375d-e7ba-11ef-8b01-3aa4e0d3963f | example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | +| group_replication_applier | a99688a7-e7ba-11ef-be5b-de475d052d4a | example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless | 3306 | ONLINE | PRIMARY | 8.0.35 | XCom | +| group_replication_applier | c4403516-e7ba-11ef-8f11-8a79c903edf0 | example-mysql-cluster-mysql-2.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +3 rows in set (0.00 sec) +``` +The roles in the output should match the roles shown in the kubectl output. + + +## Failover Testing +### Trigger a Failover +To test the MySQL Group Replication failover mechanism, delete the primary node: +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` +This triggers a failover, and one of the secondary nodes will be promoted to the primary role. + +### Verify the New Roles +Run the following command to check the updated roles: +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +```bash +example-mysql-cluster-mysql-0 +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` +Once the deleted pod ('example-mysql-cluster-mysql-0') is recreated, it will rejoin the cluster as a secondary node: +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` +This demonstrates how the failover mechanism ensures high availability by automatically promoting a secondary instance to the primary role in case of failure. + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +- Deploy a MySQL Group Replication cluster using KubeBlocks. +- Verify the cluster's state and role assignments. +- Connect to the primary node and check the replication status. +- Test the failover mechanism to ensure high availability. +By leveraging KubeBlocks, managing MySQL Group Replication clusters in Kubernetes becomes efficient and straightforward, enabling you to achieve high availability and scalability for your database workloads. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx new file mode 100644 index 00000000..38fa1758 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx @@ -0,0 +1,241 @@ +--- +title: Deploying a MySQL Group Replication Cluster with ProxySQL Using KubeBlocks +description: Learn how to deploy a MySQL Group Replication (MGR) cluster with ProxySQL integration using KubeBlocks. This guide covers configuration, verification, failover testing, and cleanup. +keywords: [KubeBlocks, MySQL, ProxySQL, Group Replication, High Availability, Kubernetes] +sidebar_position: 4 +sidebar_label: MySQL Group Replication with ProxySQL +--- + +# Deploying a MySQL Group Replication Cluster with ProxySQL Using KubeBlocks + +**MySQL Group Replication (MGR)** ensures high availability and fault tolerance by synchronizing data across multiple MySQL instances. It provides automatic failover, promoting a secondary node to primary in case of failure, ensuring continuous availability. + +**ProxySQL** is a high-performance MySQL proxy that acts as a middleware between MySQL clients and database servers. It provides features such as query routing, load balancing, query caching, and seamless failover. When combined with MGR, ProxySQL enhances cluster performance and enables efficient traffic management. + +This guide explains how to deploy a **MySQL Group Replication (MGR) cluster with ProxySQL integration** using **KubeBlocks**, simplifying the process of managing MySQL clusters in Kubernetes. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploying the MySQL Group Replication Cluster + +KubeBlocks uses a declarative configuration approach to simplify MySQL cluster management. Below is an example configuration to deploy a MySQL Group Replication cluster with three MySQL nodes and two ProxySQL instances. + +Apply the following YAML configuration: +```yaml +kubectl apply -f - < +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-34.ap-southeast-1.compute.internal/10.0.3.34 Feb 11,2025 12:47 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-228.ap-southeast-1.compute.internal/10.0.3.228 Feb 11,2025 12:47 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-2 secondary Running ap-southeast-1c ip-10-0-3-187.ap-southeast-1.compute.internal/10.0.3.187 Feb 11,2025 12:47 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1c ip-10-0-3-228.ap-southeast-1.compute.internal/10.0.3.228 Feb 11,2025 12:49 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1c ip-10-0-3-187.ap-southeast-1.compute.internal/10.0.3.187 Feb 11,2025 12:49 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi +proxysql 500m / 500m 512Mi / 512Mi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-mgr-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +## Checking Cluster Roles +To verify the roles of MySQL instances (e.g., primary and secondary), run: +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + +## Connecting to the MySQL Cluster +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +XKNv07D612 +``` + +### Connect via ProxySQL +Use ProxySQL to connect to the MySQL cluster: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -pXKNv07D612 +``` + +### Connect Directly to MySQL +Alternatively, connect directly to the MySQL instance: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pXKNv07D612 +``` + +## Check Group Replication Status +To check the status of the Group Replication cluster, run the following query: +```sql +mysql> SELECT * FROM performance_schema.replication_group_members; +``` +Example Output: +```sql ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| group_replication_applier | a17c375d-e7ba-11ef-8b01-3aa4e0d3963f | example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | +| group_replication_applier | a99688a7-e7ba-11ef-be5b-de475d052d4a | example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless | 3306 | ONLINE | PRIMARY | 8.0.35 | XCom | +| group_replication_applier | c4403516-e7ba-11ef-8f11-8a79c903edf0 | example-mysql-cluster-mysql-2.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +3 rows in set (0.00 sec) +``` +The roles in the output should match the roles shown in the kubectl output. + + +## Failover Testing +### Trigger a Failover +To test the MySQL Group Replication failover mechanism, delete the primary node: +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` +This triggers a failover, and one of the secondary nodes will be promoted to the primary role. +### Verify the New Roles +Run the following command to check the updated roles: +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +```bash +example-mysql-cluster-mysql-0 +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` +Once the deleted pod ('example-mysql-cluster-mysql-0') is recreated, it will rejoin the cluster as a secondary node: +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` +This demonstrates how the failover mechanism ensures high availability by automatically promoting a secondary instance to the primary role in case of failure. + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +- Deploy a MySQL Group Replication cluster with ProxySQL using KubeBlocks. +- Verify the cluster’s deployment and role assignments. +- Connect to the cluster via ProxySQL or directly. +- Check the replication status and test failover mechanisms. +By combining MySQL Group Replication with ProxySQL, this setup ensures high availability, seamless failover, and efficient traffic management, making it ideal for production-grade deployments. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx new file mode 100644 index 00000000..f98815f1 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx @@ -0,0 +1,370 @@ +--- +title: Deploying a MySQL Cluster and Orchestrator with KubeBlocks +description: A step-by-step guide to deploying a MySQL semi-synchronous replication cluster with Orchestrator using KubeBlocks. +keywords: [KubeBlocks, MySQL, Orchestrator, Kubernetes, DBaaS] +sidebar_position: 5 +sidebar_label: MySQL Cluster with Orchestrator +--- + +# Deploying a MySQL Cluster and Orchestrator with KubeBlocks + +Semi-synchronous replication improves data consistency between primary and replica nodes by requiring acknowledgment from at least one replica before committing transactions. + +Orchestrator is a robust MySQL High Availability (HA) and failover management tool. It provides automated monitoring, fault detection, and topology management for MySQL clusters, making it an essential component for managing large-scale MySQL deployments. With Orchestrator, you can: +- **Monitor Replication Topology**: Orchestrator continuously monitors the MySQL replication topology and provides a real-time view of the cluster's state. +- **Automated Failover**: In case of a primary node failure, Orchestrator automatically promotes a healthy replica to primary, ensuring minimal downtime. +- **Topology Management**: Orchestrator allows you to reconfigure, rebalance, and recover your MySQL topology with ease. + +This guide walks you through the process of setting up a MySQL semi-synchronous replication cluster using **KubeBlocks**, alongside **Orchestrator** for effective failover and recovery management. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Install the Orchestrator Addon + +1. View the Addon versions. +```bash +# including pre-release versions +helm search repo kubeblocks/orchestrator --devel --versions +``` + +2. Install the Addon. Specify a version with '--version'. +```bash +helm install kb-addon-orc kubeblocks/orchestrator --namespace kb-system --create-namespace --version x.y.z +``` + +3. Verify whether this Addon is installed. +```bash +helm list -A +``` +Expected Output: +```bash +orchestrator kb-system 1 2025-02-14 11:12:32.286516 +0800 CST deployed orchestrator-1.0.0 3.2.6 +``` +The STATUS is deployed and this Addon is installed successfully. + +## Deploying the MySQL Cluster with Orchestrator + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 3 nodes (1 primary, 2 replicas) in semi-synchronous mode. Additionally, it creates an Orchestrator cluster using the Raft high-availability mode and configure the relationship between the MySQL semi-synchronous cluster and the Orchestrator cluster. + +Cluster Configuration +```yaml +kubectl apply -f - < + example-mysql-cluster-mysql-1.demo.svc.cluster.local:3306 + example-mysql-cluster-mysql-server.demo.svc.cluster.local:3306 +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:21 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:22 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:23 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1c ip-10-0-3-40.ap-southeast-1.compute.internal/10.0.3.40 Mar 11,2025 10:23 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi +proxysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-orc-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +To get detailed information about the Orchestrator cluster: + +```bash +kbcli cluster describe example-orc-cluster -n demo +``` +Example Output: +```bash +Name: example-orc-cluster Created Time: Mar 11,2025 10:21 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo orchestrator raft Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +orchestrator example-orc-cluster-orchestrator.demo.svc.cluster.local:80 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +orchestrator 3.2.6 example-orc-cluster-orchestrator-0 primary Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:21 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-1 secondary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:21 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-2 secondary Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:22 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +orchestrator 500m / 500m 512Mi / 512Mi data:20Gi kb-default-sc + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +orchestrator orchestrator-raft docker.io/apecloud/orchestrator:v3.2.6 + +Show cluster events: kbcli cluster list-events -n demo example-orc-cluster +``` + +## Connecting to the MySQL Cluster +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +d3a5iS499Z +``` + +### Connect via ProxySQL +Use ProxySQL to connect to the MySQL cluster: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -pd3a5iS499Z +``` + +### Connect Directly to MySQL +Alternatively, connect directly to the MySQL instance: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-server.demo.svc.cluster.local -uroot -pd3a5iS499Z +``` + +## Testing Semi-Synchronous Replication + +In this section, we will test the semi-synchronous replication of the MySQL cluster by verifying the roles of the pods and checking their replication statuses. + +First, list all the pods in the cluster, along with their roles, to identify the primary and secondary instances: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +``` +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +From the output, we can see the following: +- 'example-mysql-cluster-mysql-0' is the primary instance. +- 'example-mysql-cluster-mysql-1' is the secondary instance. +The 'kubeblocks.io/role' label helps us easily distinguish between the roles of the instances in the replication setup. + + +Next, connect to the primary instance ('example-mysql-cluster-mysql-0') and check its semi-synchronous replication status. Use the following command to execute a query inside the MySQL pod: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.demo.svc.cluster.local -uroot -pd3a5iS499Z -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_source_status: ON": This indicates that the primary instance is configured for semi-synchronous replication as the source (or master). +- "Rpl_semi_sync_replica_status: OFF": This indicates that the primary instance is not acting as a replica in the replication setup. + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.demo.svc.cluster.local -uroot -pd3a5iS499Z -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_replica_status: ON": This indicates that the secondary instance is acting as a semi-synchronous replica and is actively receiving and acknowledging changes from the primary instance. +- "Rpl_semi_sync_source_status: OFF": This indicates that the secondary instance is not acting as a source (or master) in the replication setup. + +## Failover Testing + +The following steps demonstrate how to trigger a failover in a MySQL cluster and verify the role changes of the pods. + +To initiate a failover, delete the Pod currently assigned the primary role: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +This will trigger a failover, and the secondary instance will be promoted to the primary role. +After a while, the killed pod will be recreated and will take the secondary role: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Expected Output: +```bash +example-semisync-mysql-mysql-0 secondary +example-semisync-mysql-mysql-1 primary +``` + +This process demonstrates how the failover mechanism ensures high availability by automatically promoting a secondary instance to the primary role in the event of a failure. + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-orc-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated how to deploy a MySQL cluster with semi-synchronous replication and integrate it with Orchestrator for high availability and failover management using KubeBlocks. With the declarative configuration approach, you can easily scale and manage MySQL clusters in Kubernetes environments. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx new file mode 100644 index 00000000..49a67248 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx @@ -0,0 +1,375 @@ +--- +title: Deploying a MySQL Cluster with Orchestrator and ProxySQL using KubeBlocks +description: Learn how to deploy a MySQL semi-synchronous replication cluster with Orchestrator and ProxySQL using KubeBlocks for high availability and efficient query routing. +keywords: [KubeBlocks, MySQL, Orchestrator, ProxySQL, Kubernetes, DBaaS] +sidebar_position: 6 +sidebar_label: MySQL with Orchestrator & ProxySQL +--- + +# Deploying a MySQL Cluster and Orchestrator and ProxySQL with KubeBlocks + +Semi-synchronous replication enhances data consistency between primary and replica nodes by requiring acknowledgment from at least one replica before committing transactions. + +This guide demonstrates how to deploy a MySQL cluster using **KubeBlocks** with **Orchestrator** for high availability and failover management, and **ProxySQL** for advanced query routing and load balancing. Together, these tools create a robust and efficient MySQL cluster infrastructure. + +### **What is Orchestrator?** + +Orchestrator is a powerful MySQL High Availability (HA) and failover management tool. It automates monitoring, fault detection, and topology management for MySQL clusters, making it ideal for managing large-scale deployments. Key features include: + +- **Replication Topology Monitoring**: Provides a real-time view of the MySQL replication topology. +- **Automated Failover**: Promotes a healthy replica to primary in case of failure, ensuring minimal downtime. +- **Topology Management**: Simplifies reconfiguration, rebalancing, and recovery of MySQL clusters. + +### **What is ProxySQL?** + +ProxySQL is a high-performance MySQL proxy that acts as a middleware between MySQL clients and database servers. It enhances cluster performance with features such as: + +- **Query Routing**: Directs queries to the appropriate servers based on their purpose (e.g., read or write). +- **Load Balancing**: Distributes traffic across replicas to optimize resource usage. +- **Query Caching**: Reduces database load by caching frequent queries. +- **Failover Support**: Seamlessly handles failover scenarios without interrupting application services. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Install the Orchestrator Addon + +1. View the Addon versions. +```bash +# including pre-release versions +helm search repo kubeblocks/orchestrator --devel --versions +``` + +2. Install the Addon. Specify a version with '--version'. +```bash +helm install kb-addon-orc kubeblocks/orchestrator --namespace kb-system --create-namespace --version x.y.z +``` + +3. Verify whether this Addon is installed. +```bash +helm list -A +``` +Expected Output: +```bash +orchestrator kb-system 1 2025-02-14 11:12:32.286516 +0800 CST deployed orchestrator-1.0.0 3.2.6 +``` +The STATUS is deployed and this Addon is installed successfully. + +## Deploying the MySQL Cluster with Orchestrator + +KubeBlocks provides a declarative approach to deploying MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary and 1 replica) in semi-synchronous mode. This configuration also integrates Orchestrator (3 nodes) for failover management and ProxySQL (2 nodes) for query routing and load balancing. + +Cluster Configuration +```yaml +kubectl apply -f - < + example-mysql-cluster-mysql-1.demo.svc.cluster.local:3306 + example-mysql-cluster-mysql-server.demo.svc.cluster.local:3306 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-68.ap-southeast-1.compute.internal/10.0.3.68 Mar 10,2025 16:43 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:44 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-orc-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +To get detailed information about the Orchestrator cluster: + +```bash +kbcli cluster describe example-orc-cluster -n demo +``` +Example Output: +```bash +Name: example-orc-cluster Created Time: Mar 10,2025 16:43 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo orchestrator raft Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +orchestrator example-orc-cluster-orchestrator.demo.svc.cluster.local:80 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +orchestrator 3.2.6 example-orc-cluster-orchestrator-0 primary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:43 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-1 secondary Running ap-southeast-1c ip-10-0-3-68.ap-southeast-1.compute.internal/10.0.3.68 Mar 10,2025 16:43 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-2 secondary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:44 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +orchestrator 500m / 500m 512Mi / 512Mi data:20Gi kb-default-sc + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +orchestrator orchestrator-raft docker.io/apecloud/orchestrator:v3.2.6 + +Show cluster events: kbcli cluster list-events -n demo example-orc-cluster +``` + +## Connecting to the MySQL Cluster +KubeBlocks automatically creates a secret containing the MySQL root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +GX596H32Oz +``` + +To connect to the cluster's primary node, use the MySQL client: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-server.demo.svc.cluster.local -uroot -pGX596H32Oz +``` + +## Testing Semi-Synchronous Replication + +In this section, we will test the semi-synchronous replication of the MySQL cluster by verifying the roles of the pods and checking their replication statuses. + +First, list all the pods in the cluster, along with their roles, to identify the primary and secondary instances: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +``` +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +From the output, we can see the following: +- 'example-mysql-cluster-mysql-0' is the primary instance. +- 'example-mysql-cluster-mysql-1' is the secondary instance. + The 'kubeblocks.io/role' label helps us easily distinguish between the roles of the instances in the replication setup. + + +Next, connect to the primary instance ('example-mysql-cluster-mysql-0') and check its semi-synchronous replication status. Use the following command to execute a query inside the MySQL pod: +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.demo.svc.cluster.local -uroot -pGX596H32Oz -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_source_status: ON": This indicates that the primary instance is configured for semi-synchronous replication as the source (or master). +- "Rpl_semi_sync_replica_status: OFF": This indicates that the primary instance is not acting as a replica in the replication setup. + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.demo.svc.cluster.local -uroot -pGX596H32Oz -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` +Explanation: +- "Rpl_semi_sync_replica_status: ON": This indicates that the secondary instance is acting as a semi-synchronous replica and is actively receiving and acknowledging changes from the primary instance. +- "Rpl_semi_sync_source_status: OFF": This indicates that the secondary instance is not acting as a source (or master) in the replication setup. + +## Failover Testing + +The following steps demonstrate how to trigger a failover in a MySQL cluster and verify the role changes of the pods. + +To initiate a failover, delete the Pod currently assigned the primary role: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +This will trigger a failover, and the secondary instance will be promoted to the primary role. +After a while, the killed pod will be recreated and will take the secondary role: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Expected Output: +```bash +example-semisync-mysql-mysql-0 secondary +example-semisync-mysql-mysql-1 primary +``` + +This process demonstrates how the failover mechanism ensures high availability by automatically promoting a secondary instance to the primary role in the event of a failure. + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-orc-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated how to deploy a MySQL cluster with semi-synchronous replication, Orchestrator integration for high availability, and ProxySQL for query routing and load balancing using KubeBlocks. By leveraging the declarative configuration approach, you can easily scale and manage MySQL clusters in Kubernetes environments. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/_category_.yml new file mode 100644 index 00000000..f041cfad --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +position: 3 +label: Topologies +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx new file mode 100644 index 00000000..67b20b88 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx @@ -0,0 +1,267 @@ +--- +title: Managing MySQL Cluster Lifecycle (Stop, Start, and Restart) +description: Learn how to manage the lifecycle of a MySQL cluster in KubeBlocks, including stopping, starting, and restarting clusters to optimize resource usage and maintain flexibility. +keywords: [KubeBlocks, MySQL, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Managing MySQL Cluster Lifecycle + +This guide demonstrates how to manage the lifecycle of a MySQL cluster in **KubeBlocks**, including stopping, starting, and restarting the cluster. Proper lifecycle management helps optimize resource usage, reduce operational costs, and ensure flexibility in your Kubernetes environment. + + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +kubectl apply -f - < + + + +Option 1: Using OpsRequest + +You can stop the cluster using an OpsRequest: + +```yaml +kubectl apply -f - < + + + +Option 2: Using the Declarative Cluster API + +Alternatively, you may stop the cluster by setting the `spec.componentSpecs.stop` field to `true` in the cluster configuration: + +```bash +kubectl patch cluster example-mysql-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } +]' +``` + + + + + +### Verifying Cluster Stop +Monitor the cluster's status to ensure it transitions to the Stopped state: +```bash +kubectl get cluster -n demo -w +``` +Example Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Stopping 93s +example-mysql-cluster mysql Delete Stopped 101s +``` + +There is no Pods running in the cluster, but the persistent storage is retained. +```bash +kubectl get pods -n demo +``` +Expected Output: +```bash +No resources found in demo namespace. +``` + +```bash +kubectl get pvc -n demo +``` +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +data-example-mysql-cluster-mysql-0 Bound pvc-98ce87ab-acc6-4f95-8638-16e8052f98d8 20Gi RWO kb-default-sc 16m +data-example-mysql-cluster-mysql-1 Bound pvc-5bb87b23-7c38-45de-bf04-f2822051d897 20Gi RWO kb-default-sc 16m +``` + +### Start the Cluster + +Starting the cluster recreates the pods and brings the cluster back online. + + + + +Option 1: Using OpsRequest + +You can start the stopped cluster using an OpsRequest: + +```yaml +kubectl apply -f - < + + + +Option 1: Using the Declarative Cluster API + +Alternatively, you can start the cluster by: +- Setting the `spec.componentSpecs.stop` field to false, or +- Removing the `spec.componentSpecs.stop` field entirely. + +```bash +kubectl patch cluster example-mysql-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } +]' +``` + + + + +### Verifying Cluster Start + +Monitor the cluster's status to ensure it transitions back to the Running state: + +```bash +kubectl get cluster -n demo -w +``` + +Example Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Updating 5m54s +example-mysql-cluster mysql Delete Running 6m6s +``` + + +### Restart the Cluster + +Restarting the cluster allows you to recreate the pods for specific components without deleting or stopping the entire cluster. + +#### Using OpsRequest + +To restart a specific component (e.g., mysql), use the following OpsRequest: + +```yaml +kubectl apply -f - < + + +Option 1: Using VerticalScaling OpsRequest + +Apply the following YAML to scale up the resources for the mysql component: + +```yaml +kubectl apply -f - < + + +Option 2: Direct Cluster API Update + +Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + +```yaml +kubectl apply -f - < + + + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe example-mysql-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Summary +In this guide, you learned how to: +1. Deploy a MySQL cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the mysql component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your MySQL cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..3794a187 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,251 @@ +--- +title: Horizontal Scaling of MySQL Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a MySQL cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, MySQL, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for MySQL Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a MySQL cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +Deploy a 2-node MySQL cluster (1 primary, 1 replica) with semi-synchronous replication: + +```yaml +kubectl apply -f - < + + + +### Option 1.: Using OpsRequest +Scale out the MySQL cluster by adding 1 replica: + +```yaml +kubectl apply -f - < + + + +### Option 2.: Direct Cluster API Update + +Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: +```yaml +kubectl patch cluster example-mysql-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' +``` + + + + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the MySQL cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` + +Example Output (3 Pods): +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-0 4/4 Running 0 4m30s +example-mysql-cluster-mysql-1 4/4 Running 0 4m30s +example-mysql-cluster-mysql-2 4/4 Running 0 49s +``` + +New replicas automatically join as secondary nodes. +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` +Example Output: +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + + +## Scale-in (Remove Replicas) + + + + + + +Option 1: Using OpsRequest +Scale in the MySQL cluster by removing 1 replica: +```yaml +kubectl apply -f - < + + + +Option 2: Direct Cluster API Update + +Alternatively, update the `replicas` field in the Cluster resource: +```yaml +kubectl patch cluster example-mysql-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 2}]' +``` + + + + + +### Verify Scale-In + +Example Output (2 Pods): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-0 4/4 Running 0 10m +example-mysql-cluster-mysql-1 4/4 Running 0 10m +``` + +## Summary +In this guide, you learned how to: +- Perform scale-out operations to add replicas to a MySQL cluster. +- Perform scale-in operations to remove replicas from a MySQL cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..ce1f8fe9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/04-volume-expansion.mdx @@ -0,0 +1,235 @@ +--- +title: Expanding Volume in a MySQL Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a MySQL cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, MySQL, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a MySQL Cluster + +This guide explains how to expand the Persistent Volume Claims (PVCs) in a MySQL cluster managed by **KubeBlocks**. Volume expansion allows you to increase storage capacity dynamically, ensuring your database can scale seamlessly as data grows. If supported by the underlying storage class, this operation can be performed without downtime. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +Deploy a 2-node semi-sync MySQL cluster (1 primary, 1 secondary): + +```yaml +kubectl apply -f - < + + + +Option 1: Using VolumeExpansion OpsRequest + +Apply the following YAML to increase the volume size for the mysql component: + +```yaml +kubectl apply -f - < + + + +Option 2: Direct Cluster API Update + +Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + +```yaml +kubectl apply -f - < + + + +## Verification + +Use the following command to inspect the updated cluster configuration: +```bash +kbcli cluster describe example-mysql-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Check the status of the PVCs in the cluster to confirm that the resize operation has completed: +```bash +kubectl get pvc -l app.kubernetes.io/instance=example-mysql-cluster -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +example-mysql-cluster-mysql-data-0 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO kb-default-sc 10m +example-mysql-cluster-mysql-data-1 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO kb-default-sc 10m +``` + +## Key Considerations +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. + +## Summary +In this guide, you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your MySQL cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..16ee7924 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,411 @@ +--- +title: Create and Destroy MySQL Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage MySQL services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, MySQL, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage MySQL Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage MySQL Service Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions on how to expose a MySQL service managed by KubeBlocks, either externally or internally. You will learn how to configure external access using a cloud provider's LoadBalancer service, manage internal services, and correctly disable external exposure when no longer needed. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode. + +Cluster Configuration +```yaml +kubectl apply -f - < 3306/TCP 5m16s +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 5m16s +``` + +## Expose MySQL Service Externally or Internally + +External addresses allow public internet access to the MySQL service, while internal addresses restrict access to the user’s VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|----|---|----|---| +| ClusterIP | Internal service communication | Free | Highest| +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via SG | + + + + + + +### Option 1.: Using OpsRequest + +To expose the MySQL service externally using a LoadBalancer, create an OpsRequest resource: + +```yaml +kubectl apply -f - < + + + +### Option 2.: Using Cluster API + +Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: +```yaml +kubectl apply -f - < + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get services -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-mysql-cluster-mysql ClusterIP 172.20.129.84 3306/TCP 4h39m +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 4h39m +example-mysql-cluster-mysql-internet LoadBalancer 172.20.60.24 a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com 3306:30985/TCP 13s +``` + +### Wait for DNS Propagation + +The LoadBalancer DNS name may take 2-5 minutes to become resolvable. Verify the resolution status: + +```bash +nslookup a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com +``` + +Example Output: +```bash +Server: 192.168.101.1 +Address: 192.168.101.1#53 + +Non-authoritative answer: +Name: a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com +Address: 54.251.110.4 +``` + + +## Connect to MySQL Externally + +### Retrieve Credentials + +KubeBlocks automatically creates a Secret containing the MySQL root credentials. Retrieve the MySQL root credentials: +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +KI260UK7E9 +``` + +### Connect Using MySQL Client + +You can now connect to the MySQL database externally (e.g., from your laptop or EC2): +```bash +mysql -h a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com -uroot -pKI260UK7E9 +``` + +## Disable External Exposure + + + + + +### Option 1.: Using OpsRequest +To disable external access, create an OpsRequest: +```yaml +kubectl apply -f - < + + + +### Option 2.: Using Cluster API + +Alternatively, remove the `spec.services` field from the Cluster resource: +```bash +kubectl patch cluster example-mysql-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } +]' +``` + +Monitor the cluster status until it is Running: +```bash +kubectl get cluster example-mysql-cluster -n demo -w +``` +``` +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Running 23m +``` + + + + + +### Verify Service Removal + +Ensure that the 'example-mysql-connect-mysql-internet' Service is removed: + +```bash +kubectl get service -n demo +``` + +Expected Result: The 'example-mysql-cluster-mysql-internet' Service should be removed. + + +## Summary +This guide demonstrated how to: +- Expose a MySQL service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..7ff09f4f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,216 @@ +--- +title: Upgrading the Minor Version of a MySQL Cluster in KubeBlocks +description: Learn how to deploy and upgrade a MySQL cluster managed by KubeBlocks with minimal downtime. +keywords: [KubeBlocks, MySQL, Upgrade, Rolling Upgrade, Kubernetes] +sidebar_position: 6 +sidebar_label: Minor Version Upgrade +--- + +# Upgrading the Minor Version of a MySQL Cluster in KubeBlocks + +This guide walks you through the deployment and minor version upgrade of a MySQL cluster managed by KubeBlocks, ensuring minimal downtime during the process. + +To minimize the impact on database availability, the upgrade process starts with the replicas (secondary instances). Once the replicas are upgraded, a switchover operation promotes one of the upgraded replicas to primary. The switchover process is very fast, typically completing in a few hundred milliseconds. After the switchover, the original primary instance is upgraded, ensuring minimal disruption to the application. + + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +Deploy a 2-node semi-sync MySQL cluster (1 primary, 1 secondary): + +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 83 | ++-----------------+-------+ +1 row in set (0.00 sec) + +mysql> SHOW VARIABLES LIKE 'performance_schema'; ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| performance_schema | OFF | ++--------------------+-------+ +1 row in set (0.00 sec) +``` + +## Dynamic Parameter Example: Modifying max_connections + +Dynamic parameters can be modified without restarting the database. For example, updating the 'max_connections' parameter allows more concurrent connections to the MySQL instance. + +The expected behavior is that after modifying the configuration, the new settings take effect immediately, without the need to restart the database. + +To update the 'max_connections' parameter from 83 to 100, apply the following Reconfiguring OpsRequest: +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 100 | ++-----------------+-------+ +1 row in set (0.00 sec) +``` + +The output confirms that the 'max_connections' parameter has been successfully updated to 100. + + +## Static Parameter Example: Modifying performance_schema + +Static parameters, such as 'performance_schema', require a database restart to take effect. In this example, we will set performance_schema to ON. + +Create a Reconfigure OpsRequest. Apply the following OpsRequest YAML to update the 'performance_schema': + +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'performance_schema'; ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| performance_schema | ON | ++--------------------+-------+ +1 row in set (0.00 sec) +``` + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to modify both dynamic (e.g., max_connections) and static (e.g., performance_schema) MySQL parameters using Reconfiguring OpsRequest in KubeBlocks. While dynamic changes take effect immediately, static changes require a database restart. By leveraging KubeBlocks' declarative and automated management, these configuration updates can be applied efficiently with minimal downtime. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/08-switchover.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/08-switchover.mdx new file mode 100644 index 00000000..0343a83c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/08-switchover.mdx @@ -0,0 +1,173 @@ +--- +title: Planned Switchover in a MySQL Cluster +description: Learn how to perform a planned switchover in a MySQL cluster using KubeBlocks to ensure minimal downtime and seamless role transitions. +keywords: [KubeBlocks, MySQL, Switchover, High Availability, Kubernetes] +sidebar_position: 8 +sidebar_label: Planned Switchover in MySQL +--- + +# Planned Switchover in a MySQL Cluster + +A **switchover** is a planned operation where the primary instance in a MySQL cluster proactively transfers its role to a secondary instance. Unlike an unplanned failover, which occurs during unexpected failures, a switchover ensures a controlled and predictable role transition with minimal service disruption. + +## **Benefits of Switchover** +1. **Minimal Downtime**: The primary instance actively transfers its role to the secondary instance, resulting in very short service downtime (typically a few hundred milliseconds) +2. **Controlled Transition**: Ensures a seamless and predictable role change compared to failover, which involves detecting and recovering from a failure, often causing longer delays (several seconds or more). +3. **Maintenance-Friendly**: Ideal for planned maintenance tasks, such as node upgrades or decommissioning, while ensuring uninterrupted service. + +## **Switchover vs. Failover** + +| **Aspect** | **Switchover** | **Failover** | +|-----------------------------|-------------------------------------------|---------------------------------------| +| **Initiation** | Planned and manually triggered | Unplanned and automatically triggered| +| **Downtime** | Few hundred milliseconds | Several seconds or more | +| **Primary Role Transition** | Proactively transferred | Reactively promoted | +| **Use Case** | Planned maintenance (e.g., upgrades) | Handling unexpected failures | + +Using a switchover ensures smooth transitions and minimal service disruption, making it the preferred choice for planned maintenance activities. + + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +Deploy a 2-node semi-sync MySQL cluster (1 primary, 1 secondary): + +```yaml +kubectl apply -f - < + + + +### Option 1.: Using OpsRequest +Create an OpsRequest to mark the Pod as offline: + +```yaml +kubectl apply -f - < + + + +### Option 2.: Using Cluster API +Alternatively, update the Cluster resource directly to decommission the Pod: + +```yaml +kubectl apply -f - < + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-0 4/4 Running 0 6m38s +example-mysql-cluster-mysql-2 4/4 Running 0 6m38s +``` + +## Summary +In this guide, you learned: +- The limitations of traditional StatefulSet-based scaling in Kubernetes. +- How KubeBlocks enables precise decommissioning of specific Pods. +- Two methods to decommission a Pod: using OpsRequest or directly updating the Cluster API. + +By leveraging KubeBlocks, you can manage MySQL clusters with fine-grained control, ensuring high availability and flexibility for maintenance and workload distribution. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx new file mode 100644 index 00000000..3c4e6b6c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx @@ -0,0 +1,399 @@ +--- +title: Recovering MySQL Replica in KubeBlocks +description: How to recover a MySQL replica in a semi-synchronous cluster managed by KubeBlocks using in-place and non-in-place repair methods. +keywords: [KubeBlocks, MySQL, Replica Recovery, In-Place Repair, Non-In-Place Repair] +sidebar_position: 11 +sidebar_label: Recovering MySQL Replica +--- + +# Recovering MySQL Replica in KubeBlocks + +This guide demonstrates how to perform the following tasks in a MySQL semi-synchronous cluster managed by KubeBlocks: +- Write a record to the primary instance and verify replication on the replica. +- Stop HA, break replication, modify data on the replica, and remove replication. +- Rebuild the replica using both 'in-place' repair and 'non-in-place' repair methods. +- Verify data recovery on the replica. + +> **Note**: Above steps are intended for testing purpose only. Disabling HA, breaking replication, and modifying data on a replica can compromise database consistency. Do not perform these operations on a production database. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode. + +Cluster Configuration +```yaml +kubectl apply -f - < CREATE DATABASE test; +mysql> USE test; +mysql> CREATE TABLE t1 (id INT PRIMARY KEY, name VARCHAR(255)); +mysql> INSERT INTO t1 VALUES (1, 'John Doe'); +``` + +### Step 3: Verify Data Replication +Connect to the replica instance (example-mysql-cluster-mysql-0) to verify that the data has been replicated: +```bash +kubectl exec -ti -n demo example-mysql-cluster-mysql-0 -- mysql -uroot -pR0z5Z1DS02 +``` +Note: If the primary instance is 'example-mysql-cluster-mysql-0', you should connect to 'example-mysql-cluster-mysql-1' instead. Make sure to check the role of each instance before connecting. + +```sql +mysql> SELECT * FROM test.t1; +``` + +Example Output: +```bash ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +``` + +## Break HA & Replication and Modify the Replica + +### Step 1: Disable HA + +Fetch the HA configuration: + +```bash +kubectl get configmap -n demo example-mysql-cluster-mysql-haconfig -o yaml +``` +Expected Output: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + MaxLagOnSwitchover: "10" + enable: "true" + ttl: "15" +``` + +Patch the ConfigMap to disable HA: + +```bash +kubectl patch configmap -n demo example-mysql-cluster-mysql-haconfig --type merge -p '{"metadata":{"annotations":{"enable":"false"}}}' +``` + +### Step 2: Stop Replication + +Stop replication on the replica instance: +```sql +mysql> STOP REPLICA; +``` + +### Step 3: Change Replica to Read-Write + +Change the replica instance to read-write mode: + +```sql +mysql> SET GLOBAL super_read_only = OFF; +mysql> SET GLOBAL read_only = OFF; +``` + +### Step 4: Delete Data from the Replica +Delete the data on the replica: +```sql +mysql> DELETE FROM test.t1 WHERE id = 1; +``` + +### Step 5: Change Replica to Read-Only +Restore the replica to read-only mode: +```sql +mysql> SET GLOBAL super_read_only = ON; +mysql> SET GLOBAL read_only = ON; +``` + +### Step 6: Enable HA +Patch the ConfigMap to re-enable HA: +```bash +kubectl patch configmap -n demo example-mysql-cluster-mysql-haconfig --type merge -p '{"metadata":{"annotations":{"enable":"true"}}}' +``` + +### Step 7: Verify Data Deletion +Verify that the data has been deleted: +```sql +mysql> SELECT * FROM test.t1; +Empty set (0.00 sec) +``` + +## Rebuild the Replica + +KubeBlocks provides two approaches for rebuilding a replica: in-place repair and non-in-place repair. + +### In-Place Repair + +Rebuild the replica in-place using the following configuration: +```bash +kubectl apply -f - < SELECT * FROM test.t1; ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +1 row in set (0.01 sec) +``` + +### Non-In-Place Repair + +Rebuild the replica by creating a new instance: + +```bash +kubectl apply -f - < SELECT * FROM test.t1; ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +1 row in set (0.01 sec) +``` + +#### Check All Pods +Run the following command to list all the Pods in the MySQL cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-1 4/4 Running 0 13m +example-mysql-cluster-mysql-2 4/4 Running 0 2m14s +``` +At this point, you can see two Pods: 'example-mysql-cluster-mysql-1' and 'example-mysql-cluster-mysql-2'. The original Pod 'example-mysql-cluster-mysql-0' has been deleted. + +To verify the cluster's status, inspect the cluster resource: +```bash +kubectl get cluster example-mysql-cluster -n demo -oyaml +``` +Example Output: +```yaml + offlineInstances: + - example-mysql-cluster-mysql-0 +``` +The 'example-mysql-cluster-mysql-0' instance has been marked as offline. + + +## Summary +- In-Place Repair: Successfully rebuilt the replica and restored the deleted data. +- Non-In-Place Repair: Created a new replica instance and successfully restored the data. + +Both methods effectively recover the replica and ensure data consistency. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..3f14c6d5 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,126 @@ +--- +title: Create a Backup Repository for KubeBlocks +description: Learn how to create and configure a BackupRepo for KubeBlocks using an S3 bucket to store backup data. +keywords: [KubeBlocks, Backup, BackupRepo, S3, Kubernetes] +sidebar_position: 1 +sidebar_label: Create BackupRepo +--- + +# Create a BackupRepo for KubeBlocks + +This guide walks you through creating and configuring a BackupRepo in KubeBlocks using an S3 bucket for storing backup data. + +## Prerequisites +- AWS CLI configured with appropriate permissions to create S3 buckets. +- kubectl access to your Kubernetes cluster. +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) and running. + +## Step 1: Create S3 Bucket + +Use the AWS CLI to create an S3 bucket in your desired region. Replace `` with your target AWS region (e.g., `us-east-1`, `ap-southeast-1`). + +```bash +aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +Example (for us-west-1): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +Example Output: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +Verification: +Confirm the bucket was created by listing its contents (it will be empty initially): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## Step 2: Create a Kubernetes Secret for AWS Credentials + +Store your AWS credentials securely in a Kubernetes Secret. Replace `` and `` with your actual AWS credentials: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## Step 3: Configure Backup Repository + +A BackupRepo is a custom resource that defines a storage repository for backups. In this step, you'll integrate your S3 bucket with KubeBlocks by creating a BackupRepo resource. + +Apply the following YAML to create the BackupRepo. Replace fields(e.g., bucket name, region) with your specific settings. + +```yaml +kubectl apply -f - < 0/1 Init:0/1 0 6s +restore-preparedata-XXXXX- 1/1 Running 0 12s +restore-preparedata-XXXXX- 0/1 Completed 0 20s +``` +These pods copy backup data to Persistent Volumes (PVCs). + + +2. MySQL Cluster Pods: +```bash +example-mysql-cluster-restored-mysql-0 0/4 Pending 0 0s +example-mysql-cluster-restored-mysql-0 4/4 Running 0 20s +``` +Pods initialize with restored data and start MySQL services. + +## Perform Restoration via Ops API + +Alternatively, use the Ops API to initiate the restoration process: + +### Step 1: Create a Restore Request + +```yaml +kubectl apply -f - < 0/1 Init:0/1 0 6s +restore-preparedata-XXXXX- 1/1 Running 0 12s +restore-preparedata-XXXXX- 0/1 Completed 0 20s +``` +These pods copy backup data to Persistent Volumes (PVCs). + + +2. MySQL Cluster Pods: +```bash +example-mysql-cluster-restored-mysql-0 0/4 Pending 0 0s +example-mysql-cluster-restored-mysql-0 4/4 Running 0 20s +``` +After restoration, MySQL cluster pods initialize with the restored data and start the MySQL service. + +#### Step 2: Verify Cluster Status +Check the status of the restored cluster: +```bash +kubectl get cluster example-mysql-cluster-restored -n demo +``` +Successful Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster-restored Delete Running 97s +``` + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-mysql-cluster-restored -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to restore a MySQL cluster in KubeBlocks using a full backup and continuous binlog backup for Point-In-Time Recovery (PITR). Key steps included: +- Verifying available backups. +- Extracting encrypted system account credentials. +- Creating a new MySQL cluster with restoration configuration. +- Monitoring the restoration process. + +With this approach, you can restore a MySQL cluster to a specific point in time, ensuring minimal data loss and operational continuity. + diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/_category_.yml new file mode 100644 index 00000000..cd4faeaf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Backup And Restores +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..4a5cd47f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,143 @@ +--- +title: Create a MySQL Cluster with a Custom Root Password on KubeBlocks +description: Learn how to deploy a MySQL cluster on KubeBlocks with a custom root password securely configured using Kubernetes Secrets. +keywords: [MySQL, KubeBlocks, Custom Password, Kubernetes, Secrets] +sidebar_position: 1 +sidebar_label: Custom Password +--- + +# Create MySQL Cluster With Custom Password on KubeBlocks + +This guide demonstrates how to deploy a MySQL cluster in KubeBlocks with a custom root password stored in a Kubernetes Secret. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +``` +Expected Output: +```bash +namespace/demo created +``` + +## Deploying the MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode and a custom root password. + +### Step 1: Create a Secret for the Root Account + +The custom root password is stored in a Kubernetes Secret. Create the Secret by applying the following YAML: + +```yaml +kubectl apply -f - < STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` +If the SSL field displays a cipher, the connection is successfully encrypted using TLS. + +## Cleanup +To remove all resources created in this tutorial, run the following commands: +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +- Deploy a MySQL cluster using KubeBlocks and enable TLS encryption for secure communication between the MySQL client and server. +- Establish a secure MySQL connection with TLS. +- Verify the secure connection using the MySQL shell. + +TLS encryption ensures secure communication by encrypting data in transit and protecting sensitive information. By following these steps, you can deploy a secure MySQL cluster on Kubernetes with ease using KubeBlocks. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx new file mode 100644 index 00000000..c9323739 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx @@ -0,0 +1,192 @@ +--- +title: Deploy a MySQL Cluster with User-Provided TLS on KubeBlocks +description: Learn how to deploy a MySQL cluster on KubeBlocks with user-provided TLS certificates for secure communication. This guide covers generating certificates, deploying the cluster, and verifying secure connections. +keywords: [KubeBlocks, MySQL, Kubernetes, TLS, Secure Communication, User-Provided Certificates] +sidebar_position: 2 +sidebar_label: MySQL Cluster with User-Provided TLS +--- + +# Deploy a MySQL Cluster with User-Provided TLS on KubeBlocks + +This guide explains how to deploy a MySQL cluster with **user-provided TLS certificates** using KubeBlocks. By supplying your own certificates, you have full control over the security configuration for encrypted communication between the MySQL client and server. This guide covers generating certificates, deploying the cluster, and verifying the secure connection. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Generating Certificates + +To enable TLS encryption, you will need to provide a Certificate Authority (CA), a server certificate, and a private key. Follow these steps to generate these using OpenSSL: + +1. Generate the Root Certificate (CA) +```bash +# Create the CA private key (password optional) +openssl genrsa -aes256 -out ca-key.pem 4096 + +# Generate a self-signed root certificate (valid for 10 years) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# Enter the required information (e.g., Common Name can be "MySQL Root CA") +``` + +2. Generate the Server Certificate & Key +```bash +# Generate the server private key +openssl genrsa -out server-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key server-key.pem -out server-req.pem +# Enter server identification details, such as: +# Common Name (CN) = Server domain name or IP (must match the MySQL server address!) + +# Sign the server certificate with the CA (valid for 10 years) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +3. Verify the Certificates +Verify that the server certificate is valid and signed by the CA: +```bash +# Verify the server certificate +openssl verify -CAfile ca.pem server-cert.pem +``` +Expected Output: +```bash +server-cert.pem: OK +``` + +## Create Kubernetes Secrets +Store the generated certificates and keys in a Kubernetes Secret to make them accessible to your MySQL cluster: +```bash +kubectl create secret generic mysql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` +This secret contains the CA, server certificate, and private key required to enable mTLS on the MySQL cluster. + +## Deploying the MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode with user-provided TLS certificates: +```yaml +kubectl apply -f - < STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` +If you see SSL information in the output, the connection is successfully encrypted using TLS. + + +## Cleanup +Remove all resources after testing: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete secret mysql-tls-secret -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +- Generate a self-signed CA and server certificates using OpenSSL. +- Store the certificates in a Kubernetes Secret. +- Deploy a MySQL cluster with TLS encryption using KubeBlocks. +- Connect to the MySQL cluster securely using TLS and verify the connection. + +Using TLS ensures secure communication between the MySQL client and server, protecting sensitive data in transit. By following these steps, you can easily set up and manage a secure MySQL cluster on Kubernetes using KubeBlocks. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/03-mtls.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/03-mtls.mdx new file mode 100644 index 00000000..92222cbf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/03-mtls.mdx @@ -0,0 +1,261 @@ +--- +title: Deploy a MySQL Cluster with mTLS on KubeBlocks +description: Learn how to configure mutual TLS (mTLS) encryption for a MySQL cluster on KubeBlocks. This guide walks through certificate generation, cluster deployment, mTLS user setup, and secure connection verification. +keywords: [KubeBlocks, MySQL, Kubernetes, mTLS, Mutual TLS, Secure Communication] +sidebar_position: 3 +sidebar_label: MySQL Cluster with mTLS +--- + +# Create a MySQL Cluster With mTLS on KubeBlocks + +This guide explains how to configure a MySQL cluster with **mutual TLS (mTLS)** encryption using KubeBlocks. mTLS ensures both the server and client authenticate each other during a connection, providing enhanced security for your database infrastructure. This guide covers certificate generation, cluster deployment, user configuration for mTLS, and secure connection verification. + + +## What is mTLS? +Mutual TLS (mTLS) is an enhanced security protocol that ensures both the server and the client authenticate each other during a connection. Unlike traditional TLS, where only the client verifies the server's identity, mTLS adds an extra layer of security by requiring both sides to present valid certificates issued by a trusted Certificate Authority (CA). + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Generating Certificates + +To enable TLS encryption, you will need to provide a Certificate Authority (CA), a server certificate, and a private key. Follow these steps to generate these using OpenSSL: + +1. Generate the Root Certificate (CA) +```bash +# Create the CA private key (password optional) +openssl genrsa -aes256 -out ca-key.pem 4096 + +# Generate a self-signed root certificate (valid for 10 years) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# Enter the required information (e.g., Common Name can be "MySQL Root CA") +``` + +2. Generate the Server Certificate & Key +```bash +# Generate the server private key +openssl genrsa -out server-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key server-key.pem -out server-req.pem +# Enter server identification details, such as: +# Common Name (CN) = Server domain name or IP (must match the MySQL server address!) + +# Sign the server certificate with the CA (valid for 10 years) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +3. Generate the Client Certificate & Key +```bash +# Generate the client private key +openssl genrsa -out client-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key client-key.pem -out client-req.pem +# Enter client identification details, such as: +# Common Name (CN) = Client username (e.g., "mysql_client_1") + +# Sign the client certificate with the CA (valid for 1 year) +openssl x509 -req -in client-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -days 365 -sha256 +``` + +4. Verify the Certificates +Verify that the server certificate is valid and signed by the CA: +```bash +# Verify the server certificate +openssl verify -CAfile ca.pem server-cert.pem +``` +Expected Output: +```bash +server-cert.pem: OK +``` + +```bash +# Verify the client certificate +openssl verify -CAfile ca.pem client-cert.pem +``` +Expected Output: +```bash +client-cert.pem: OK +``` + +## Create Kubernetes Secrets +Store the generated certificates and keys in a Kubernetes Secret to make them accessible to your MySQL cluster. +```bash +kubectl create secret generic mysql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` +This secret contains the CA, server certificate, and private key required to enable mTLS on the MySQL cluster. + +## Deploying the MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode with user-provided TLS certificates: + +```yaml +kubectl apply -f - < CREATE USER 'mtls_user'@'%' IDENTIFIED BY 'kni676X2W1' REQUIRE X509; +Query OK, 0 rows affected (0.01 sec) + +mysql> GRANT ALL PRIVILEGES ON *.* TO 'mtls_user'@'%'; +Query OK, 0 rows affected (0.01 sec) + +mysql> FLUSH PRIVILEGES; +Query OK, 0 rows affected (0.01 sec) +``` + +## Connect to MySQL Cluster with mTLS + +use the `kubectl port-forward` command to map port 3306 of the primary replica of the MySQL Cluster to port 3306 on your local machine: + +```bash +kubectl port-forward svc/mysql-cluster-mysql 3306:3306 -n default +Forwarding from 127.0.0.1:3306 -> 3306 +Forwarding from [::1]:3306 -> 3306 +``` + +Then, open another shell and use the mysql command-line tool to connect to the local port 3306. + +If connecting without client certificates, you will see an error: +```bash +mysql -h 127.0.0.1 -umtls_user -pkni676X2W1 --ssl-mode=REQUIRED +``` +Expected Output: +```bash +ERROR 1045 (28000): Access denied for user 'mtls_user'@'127.0.0.1' (using password: YES) +``` + +To connect successfully, provide the client certificate and key: +```bash +mysql -h 127.0.0.1 -umtls_user -pkni676X2W1 --ssl-mode=REQUIRED --ssl-ca=/path/to/ca.pem --ssl-cert=/path/to/client-cert.pem --ssl-key=/path/to/client-key.pem +``` + +Verify TLS connection status in MySQL shell: +```sql +mysql> STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` + +## Cleanup +Remove all resources created during this tutorial: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete secret mysql-tls-secret -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide, you learned how to: +1. Generate a self-signed CA, server, and client certificates using OpenSSL. +2. Deploy a MySQL cluster with mTLS enabled on KubeBlocks. +3. Configure an mTLS user and verify secure connections. + +mTLS provides an additional layer of trust and security by ensuring both client and server authentication. By following this guide, you can securely deploy and manage MySQL clusters with mTLS on Kubernetes using KubeBlocks. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/_category_.yml new file mode 100644 index 00000000..fd9b300a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/07-tls/_category_.yml @@ -0,0 +1,4 @@ +position: 7 +label: TLS +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..7884eaa7 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,250 @@ +--- +title: Observability for MySQL Clusters with the Prometheus Operator +description: Learn how to set up observability for MySQL Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, MySQL, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for MySQL Clusters +--- + +# Observability for MySQL Clusters with the Prometheus Operator + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Installing the Prometheus Operator + +If the Prometheus Operator is not already installed, you can install it using Helm: + +```bash +kubectl create namespace monitoring +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack -n monitoring --create-namespace +``` + +Or you can follow the steps in [How to install the Prometheus Operator](../docs/install-prometheus.md) to install the Prometheus Operator. + +Check the status of deployed pods: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a MySQL Semi-Synchronous Cluster + +KubeBlocks uses a declarative approach for managing MySQL clusters. Below is an example configuration for deploying a MySQL cluster with 2 nodes (1 primary, 1 replicas) in semi-synchronous mode. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +kubectl apply -f - < 91m v1.31.5-eks-5d632ec +ip-10-0-1-183.ap-southeast-1.compute.internal Ready 71m v1.31.5-eks-5d632ec +ip-10-0-1-217.ap-southeast-1.compute.internal Ready 2m13s v1.31.5-eks-5d632ec +ip-10-0-2-186.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +ip-10-0-2-252.ap-southeast-1.compute.internal Ready 71m v1.31.5-eks-5d632ec +ip-10-0-2-71.ap-southeast-1.compute.internal Ready 2m24s v1.31.5-eks-5d632ec +ip-10-0-3-143.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +ip-10-0-3-205.ap-southeast-1.compute.internal Ready 36s v1.31.5-eks-5d632ec +ip-10-0-3-238.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +``` +From the output, we can see there are three nodes in each AZ: ap-southeast-1a, ap-southeast-1b, and ap-southeast-1c. + + +## Deploy a MySQL Cluster Across Different AZs + +### Creating a MySQL Cluster +To deploy a 3-node semi-synchronous MySQL cluster (1 primary, 2 secondary) across different AZs, use the following YAML configuration: +```yaml +kubectl apply -f - < +mysql 500m / 500m 512Mi / 512Mi data:20Gi +``` +**Observation**: +- The default replica has 0.5 CPU and 0.5Gi memory. +- The custom replica has 1 CPU and 1Gi memory. + +## Expose the custom Pod as a Service +To expose the custom Pod via a separate Service, use the following configuration: +```yaml +kubectl apply -f - < 3306/TCP 12m +example-mysql-cluster-mysql ClusterIP 172.20.11.166 3306/TCP 12m +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 12m +``` + +### Accessing Your Custom Pod via the Service +Retrieve the root credentials: +```bash +kubectl get secrets -n demo example-semisync-mysql-mysql-account-root -o jsonpath='{.data.username}' | base64 -d + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +``` +Expected Output: +```bash +root + +uk263gR24s +``` + +Connect to the custom Pod from inside one of the MySQL containers: +```bash +kubectl exec -it example-mysql-cluster-mysql-0 -n demo -- mysql -hexample-mysql-cluster-custom-pod -uroot -puk263gR24s +``` +This custom Pod is provisioned with additional resources, making it ideal for running complex queries or analytical workloads. + +## Cleanup +To remove all created resources, delete the MySQL cluster along with its namespace: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + +## Conclusion +By customizing Pod resource configurations and labels through KubeBlocks, you can build a flexible and resource-efficient MySQL environment. Whether you need a powerful primary instance or specialized report-generation replicas, KubeBlocks Operator enables you to fine-tune each Pod’s CPU, memory, and storage according to workload demands. diff --git a/docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx b/docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx new file mode 100644 index 00000000..4945c68c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx @@ -0,0 +1,291 @@ +--- +title: Configuring MySQL Cluster with Controlled Pod Creation, Scaling, and Deletion Parallelism in KubeBlocks +description: Learn how to configure MySQL clusters in KubeBlocks with controlled pod parallelism for creation, scaling, and deletion using the `parallelPodManagementConcurrency` parameter. +keywords: [KubeBlocks, MySQL, Pod Management, Parallelism, Kubernetes] +sidebar_position: 3 +sidebar_label: Pod Management Parallelism +--- + +# Configuring a MySQL Cluster with Controlled Pod Creation, Scaling, and Deletion Parallelism in KubeBlocks + +This guide demonstrates how to control pod creation, scaling, and deletion parallelism for MySQL clusters in KubeBlocks using the `parallelPodManagementConcurrency` parameter. By defining the maximum number of pods that can be managed in parallel, it allows users to balance operational speed and system stability. Unlike the `podManagementPolicy` in StatefulSet, which only provides two fixed options (`OrderedReady` or `Parallel`), `parallelPodManagementConcurrency` offers more flexibility, making it ideal for both resource-sensitive and production environments. + + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a MySQL Semi-Synchronous Cluster + +Deploy a 2-node semi-sync MySQL cluster (1 primary, 1 secondary) and set the `parallelPodManagementConcurrency` parameter to 1 to enforce sequential pod creation. + +```yaml +kubectl apply -f - < +Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-postgresql kb-system 1 2025-05-21 deployed postgresql-1.0.0 +``` + + +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/postgresql --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-postgresql kubeblocks-addons/postgresql --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search postgresql + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install postgresql --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + postgresql 0.9.1 kubeblocks + postgresql 0.9.2 kubeblocks + postgresql 0.9.3 kubeblocks + postgresql 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable postgresql + # Disable Add-on + kbcli addon disable postgresql + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the PostgreSQL Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported PostgreSQL Versions + +**List available PostgreSQL versions:** + +```bash +kubectl get cmpv postgresql +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 33d +``` +
+ +**Check version compatibility for ComponentDefinitions** + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv postgresql -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +postgresql-12-1.0.0 +postgresql-14-1.0.0 +postgresql-15-1.0.0 +postgresql-16-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv postgresql -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("postgresql-14"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `postgresql-14`: + +
+Example Output +```text +14.7.2 +14.8.0 +``` +
+ +### Storage Configuration + +PostgreSQL requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a PostgreSQL Cluster + +Deploy a basic PostgreSQL cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/cluster.yaml +``` + +This creates: +- A 2-replica PostgreSQL cluster +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage +- Automatic primary-replica configuration + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `postgresql` to create a PostgreSQL Cluster + clusterDef: postgresql + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # Valid options are: [replication] + topology: replication + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: postgresql + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # Valid options are: [12.14.0,12.14.1,12.15.0,12.22.0,14.7.2,14.8.0,14.18.0,15.7.0,15.13.0,16.4.0,16.9.0,17.5.0] + serviceVersion: "14.7.2" + # Determines whether metrics exporter information is annotated on the + # Component's headless Service. + # Valid options are [true, false] + disableExporter: false + # Update `replicas` to your need. + replicas: 2 + # Specifies the resources required by the Component. + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +### Create a Version-Specific PostgreSQL Cluster + +To create a cluster with a specific version, configure `spec.componentSpecs.serviceVersion` (major.minor version) fields before applying it: + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 12.15.0 # Valid options: [12.15.0,12.14.1,12.14.0] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 14.7.2 # Valid options: [14.18.0,14.8.0,14.7.2] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 15.7.0 # Valid options: [15.13.0,15.7.0] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 # Valid options: [16.9.0,16.4.0] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 17.5.0 # Valid options: [17.5.0] + ``` + + + +List available `ComponentDefinition` and `ComponentVersion`: + +```bash +kubectl get cmpd -l app.kubernetes.io/name=postgresql +``` + +
+Example Output +```bash +NAME SERVICE SERVICE-VERSION STATUS AGE +postgresql-12-1.0.0 postgresql 12.15.0 Available 22d +postgresql-14-1.0.0 postgresql 14.8.0 Available 22d +postgresql-15-1.0.0 postgresql 15.7.0 Available 22d +postgresql-16-1.0.0 postgresql 16.4.0 Available 22d +postgresql-17-1.0.0 postgresql 17.5.0 Available 22d +``` +
+ +```bash +kubectl get cmpv -l app.kubernetes.io/name=postgresql +``` + +
+Example Output + +```bash +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 22d +``` + +
+ +## Verify Cluster Status + +When deploying a PostgreSQL cluster with 2 replicas, KubeBlocks automatically configures: +- Primary replica (read/write operations) +- Secondary replica (read-only operations) + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational +3. Replicas have correct roles + +Check status using either method: + + + + +```bash +kubectl get cluster pg-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Running 107s + +kubectl get pods -l app.kubernetes.io/instance=pg-cluster -n demo +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 31m +pg-cluster-postgresql-1 4/4 Running 0 31m +``` + + + + + +With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe pg-cluster -n demo +Name: pg-cluster Created Time: May 15,2025 14:23 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo postgresql replication Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +postgresql pg-cluster-postgresql-postgresql.demo.svc.cluster.local:5432 + pg-cluster-postgresql-postgresql.demo.svc.cluster.local:6432 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +postgresql 14.7.2 pg-cluster-postgresql-0 primary Running zone-1a ip-x-y-z Dec 16,2024 08:37 UTC+0800 +postgresql 14.7.2 pg-cluster-postgresql-1 secondary Running zone-1b ip-x-y-z Dec 16,2024 08:37 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +postgresql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +postgresql postgresql-14-1.0.0 docker.io/apecloud/spilo:14.7.2-pgvector-v0.6.1 + docker.io/bitnami/pgbouncer:1.19.0 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo pg-cluster +``` + + + + +## Access the PostgreSQL Cluster + +KubeBlocks automatically provisions: +1. Credentials stored in Secret `pg-cluster-postgresql-account-postgres` +2. ClusterIP Service `pg-cluster-postgresql-postgresql` + +### Retrieve Credentials +```bash +# Get username +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) + +# Get password +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### Connection Methods + + + + +Connect directly to a pod: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} \ + psql -h pg-cluster-postgresql-postgresql +``` + + + + +1. Forward service port: + ```bash + kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo + ``` + +2. Connect via localhost: + ```bash + psql -h 127.0.0.1 -U${NAME} -W + ``` + + + +:::note +**Production Considerations** + +For production environments, avoid using `kubectl exec` and `port-forward`. Instead implement: +- LoadBalancer or NodePort Services for external access +- Network policies to restrict access +- TLS encryption for secure connections +- Connection pooling for better performance +::: + +## Stop the PostgreSQL Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/stop.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-stop + namespace: demo +spec: + clusterName: pg-cluster + type: Stop +``` + + + +Alternatively, stop by setting `spec.componentSpecs.stop` to true: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + +```yaml +spec: + componentSpecs: + - name: postgresql + stop: true # Set to stop component + replicas: 2 +``` + + + +## Start the PostgreSQL Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/start.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-start + namespace: demo +spec: + clusterName: pg-cluster + type: Start +``` + + + +Restart by setting `spec.componentSpecs.stop` to false: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + +```yaml +spec: + componentSpecs: + - name: postgresql + stop: false # Set to start component + replicas: 2 +``` + + + +## Delete PostgreSQL Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster pg-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster pg-cluster -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..5f9e1a1f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,283 @@ +--- +title: PostgreSQL Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage PostgreSQL cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, PostgreSQL, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL Cluster Lifecycle Management + +This guide demonstrates how to manage a PostgreSQL cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a PostgreSQL cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-stop-ops + namespace: demo +spec: + clusterName: pg-cluster + type: Stop +``` + + + + +Option 2: Cluster API Patch + +Modify the cluster spec directly by patching the stop field: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } +]' +``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Stopping 6m3s + pg-cluster postgresql Delete Stopped 6m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + data-pg-cluster-postgresql-0 Bound pvc-dcfb1ebc-2773-4edd-9898-e11da76062c4 20Gi RWO standard 19m + data-pg-cluster-postgresql-1 Bound pvc-36366e01-0178-43fa-b1a0-4168b057dd10 20Gi RWO standard 19m + ``` + +### Starting the Cluster + +Starting a stopped PostgreSQL cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: pg-cluster + type: Start +``` + + + + + +Modify the cluster spec to resume operation: +1. Set stop: false, or +2. Remove the stop field entirely + + ```bash + kubectl patch cluster pg-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Updating 22m + pg-cluster postgresql Delete Running 22m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + pg-cluster-postgresql-0 1/1 Running 0 2m + pg-cluster-postgresql-1 1/1 Running 0 1m + ``` + +3. Check service endpoints: + ```bash + kubectl get endpoints pg-cluster-postgresql-postgresql -n demo + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Using OpsRequest API** + +Target a specific component for restart: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-restart-ops + namespace: demo +spec: + clusterName: pg-cluster + type: Restart + restart: + - componentName: postgresql +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest pg-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-restart-ops Restart pg-cluster Running 0/2 10s + pg-cluster-restart-ops Restart pg-cluster Running 1/2 65s + pg-cluster-restart-ops Restart pg-cluster Running 2/2 2m5s + pg-cluster-restart-ops Restart pg-cluster Succeed 2/2 2m5s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster + ``` + Note: Pods will show new creation timestamps after restart + +3. Verify component health: + ```bash + kbcli cluster describe pg-cluster -n demo + ``` + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a PostgreSQL cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your PostgreSQL cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..fc407f42 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,189 @@ +--- +title: Vertical Scaling in a PostgreSQL Cluster +description: Learn how to perform vertical scaling in a PostgreSQL cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, PostgreSQL, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for PostgreSQL Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a PostgreSQL cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for PostgreSQL instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks orchestrates scaling with minimal impact: +1. Secondary replicas update first +2. Primary updates last after secondaries are healthy +3. Cluster status transitions from `Updating` to `Running` + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Secondary replicas are updated first (one at a time) +1. Primary is updated last after secondary replicas are healthy +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the postgresql component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-vscale-ops + namespace: demo + spec: + clusterName: pg-cluster + type: VerticalScaling + verticalScaling: + - componentName: postgresql + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + What Happens During Vertical Scaling? + - Secondary Pods are recreated first to ensure the primary Pod remains available. + - Once all secondary Pods are updated, the primary Pod is restarted with the new resource configuration. + + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops pg-cluster-vscale-ops -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 0/2 52s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 1/2 64s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 2/2 2m6s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 2/2 2m6s + pg-cluster-vscale-ops VerticalScaling pg-cluster Succeed 2/2 2m6s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting PostgreSQL parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe pg-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +postgresql 1 / 1 1Gi / 1Gi data:20Gi standard +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a PostgreSQL cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the postgresql component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your PostgreSQL cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..4eec0094 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,293 @@ +--- +title: Horizontal Scaling of PostgreSQL Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a PostgreSQL cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, PostgreSQL, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for PostgreSQL Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a PostgreSQL cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running` with `secondary` role +2. Data synced from primary to new replica +3. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the PostgreSQL cluster by adding 1 replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-scale-out-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops pg-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-scale-out HorizontalScaling pg-cluster Running 0/1 8s + pg-scale-out HorizontalScaling pg-cluster Running 1/1 24s + pg-scale-out HorizontalScaling pg-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 3 # increase replicas to scale-out + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the PostgreSQL cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +Example Output (3 Pods): +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 13m +pg-cluster-postgresql-1 4/4 Running 0 12m +pg-cluster-postgresql-2 4/4 Running 0 5m5s +``` + +New replicas automatically join as secondary nodes. +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 13m primary +pg-cluster-postgresql-1 4/4 Running 0 12m secondary +pg-cluster-postgresql-2 4/4 Running 0 5m54s secondary +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +2. If removing a primary replica, automatic switchover occurs first +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + +:::note +If the replica being scaled-in happens to be a primary replica, KubeBlocks will trigger a Switchover actions. And this pod will not be terminated until this Switchover action succeeds. +::: + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the PostgreSQL cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-scale-in-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops pg-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-scale-in HorizontalScaling pg-cluster Running 0/1 8s + pg-scale-in HorizontalScaling pg-cluster Running 1/1 24s + pg-scale-in HorizontalScaling pg-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 1 # decrease replicas to scale-in + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 16m +``` + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a PostgreSQL cluster. +- Perform scale-in operations to remove replicas from a PostgreSQL cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..0149f1c7 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx @@ -0,0 +1,237 @@ +--- +title: Expanding Volume in a PostgreSQL Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a PostgreSQL cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, PostgreSQL, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a PostgreSQL Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a PostgreSQL cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a PostgreSQL Replication Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage PostgreSQL clusters. Below is an example configuration for deploying a PostgreSQL cluster with 2 replicas (1 primary, 1 secondary). + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the postgresql component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: pg-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: postgresql + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops pg-cluster-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe pg-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +postgresql 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=pg-cluster -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +pg-cluster-postgresql-data-0 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO 33m +pg-cluster-postgresql-data-1 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO 33m +``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your PostgreSQL cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..7cc3da1b --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,386 @@ +--- +title: Create and Destroy PostgreSQL Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage PostgreSQL services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, PostgreSQL, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage PostgreSQL Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage PostgreSQL Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing PostgreSQL services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## View Network Services +List the Services created for the PostgreSQL cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=pg-cluster -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +pg-cluster-postgresql-postgresql ClusterIP 10.96.19.237 5432/TCP,6432/TCP 157m +``` + +:::note + +There are two ports here 5432 and 6432, where 5432 is for postgresql and 6432 for PgBouncer. + +::: + +## Expose PostgreSQL Service + +External service addresses enable public internet access to PostgreSQL, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the PostgreSQL service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: pg-cluster + expose: + - componentName: postgresql + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + # Specifies a role to target with the service. + # If specified, the service will only be exposed to pods with the matching + # role. + roleSelector: primary + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops pg-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-expose-enable-ops Expose pg-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: postgresql + name: postgresql-internet + serviceName: postgresql-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-postgresql + port: 5432 + protocol: TCP + targetPort: tcp-postgresql + type: LoadBalancer + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=pg-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +pg-cluster-postgresql-postgresql ClusterIP 10.96.19.237 5432/TCP,6432/TCP 33m +pg-cluster-postgresql-internet LoadBalancer 172.20.60.24 5432:31243/TCP 1m +``` + +### Wait for DNS Propagation + +The LoadBalancer DNS name may take 2-5 minutes to become resolvable. Verify the resolution status: + +```bash +nslookup # replace with the real IP from previous output. +``` + +## Connect to PostgreSQL Externally + +### Retrieve Credentials + +KubeBlocks automatically creates a Secret containing the PostgreSQL postgres credentials. Retrieve the PostgreSQL postgres credentials: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +### Connect Using PostgreSQL Client + +You can now connect to the PostgreSQL database externally (e.g., from your laptop or EC2): +```bash +psql -h -U${NAME} -W +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: pg-cluster + expose: + - componentName: postgresql + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops pg-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-expose-disable-ops Expose pg-cluster Succeed 1/1 12s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Running 23m + ``` + + + +### Verify Service Removal + +Ensure that the 'pg-cluster-postgresql-internet' Service is removed: + +```bash +kubectl get service -n demo +``` + +Expected Result: The 'pg-cluster-postgresql-internet' Service should be removed. + +## Expose PgBouncer Service + +PostgreSQL uses a multi-process architecture that creates a separate backend process for each connection. Excessive connections consume significant memory, reducing database throughput and stability. KubeBlocks addresses this with PgBouncer, a connection pool for PostgreSQL clusters. + + +To expose PgBouncer Service, just update ports information with that of pgbouncer as following: + +```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" + componentSelector: postgresql + name: postgresql-internet + serviceName: postgresql-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-pgbouncer + port: 6432 + protocol: TCP + targetPort: tcp-pgbouncer + type: LoadBalancer + componentSpecs: + - name: postgresql +... +``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a PostgreSQL service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing PostgreSQL services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..aaee8918 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,293 @@ +--- +title: Upgrading the Minor Version of a PostgreSQL Cluster in KubeBlocks +description: Learn how to deploy and upgrade a PostgreSQL cluster managed by KubeBlocks with minimal downtime. +keywords: [KubeBlocks, PostgreSQL, Upgrade, Rolling Upgrade, Kubernetes] +sidebar_position: 6 +sidebar_label: Minor Version Upgrade +hidden: true +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Upgrading the Minor Version of a PostgreSQL Cluster in KubeBlocks + +This guide walks you through the deployment and minor version upgrade of a PostgreSQL cluster managed by KubeBlocks, ensuring minimal downtime during the process. + +To minimize the impact on database availability, the upgrade process starts with the replicas (secondary instances). Once the replicas are upgraded, a switchover operation promotes one of the upgraded replicas to primary. The switchover process is very fast, typically completing in a few hundred milliseconds. After the switchover, the original primary instance is upgraded, ensuring minimal disruption to the application. + + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a PostgreSQL Replication Cluster + +KubeBlocks uses a declarative approach for managing PostgreSQL clusters. Below is an example configuration for deploying a PostgreSQL cluster with 2 replicas (1 primary, 1 replicas). + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 14.7.2 # use 14.7.2 here to test minor version upgrade + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +## Verifying the Deployment +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Example Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +Once the cluster status becomes Running, your PostgreSQL cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: + +## List All Available PostgreSQL Versions + +Use the following command to display the PostgreSQL versions supported by your KubeBlocks installation: +```bash +kubectl get cmpv postgresql +``` +Expected Output: +```bash +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 33d +``` + +Note: The list of supported versions may vary depending on your KubeBlocks version. + +## Upgrading the PostgreSQL Version + +### Identify the Current Primary and Secondary Instances + +Run the following command to identify the roles of the cluster instances: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 66m primary +pg-cluster-postgresql-1 4/4 Running 0 65m secondary +``` + +### Check compatible versions for the same ComponentDefinition + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv postgresql -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +postgresql-12-1.0.0 +postgresql-14-1.0.0 +postgresql-15-1.0.0 +postgresql-16-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv postgresql -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("postgresql-14"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `postgresql-14`: + +
+Example Output +```text +14.7.2 +14.8.0 +``` +
+ +### Apply the Upgrade + +**Expected Workflow**: + +1. Secondary replicas are upgrade first (one at a time) +1. Primary is upgrade last after secondary replicas are healthy +1. Cluster status transitions from `Updating` to `Running` + +To upgrade the PostgreSQL version, modify the serviceVersion field in the Cluster resource. In this example, we will upgrade the PostgreSQL version from `14.7.2` to `14.8.0` + + + + + + Option 1: Using OpsRequest + + You can upgrade the cluster using an OpsRequest: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-upgrade + namespace: demo + spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: pg-cluster + type: Upgrade + upgrade: + components: + - componentName: postgresql + # Specifies the desired service version of component + serviceVersion: "14.8.0" + ``` + + + + Option 1: Using the Declarative Cluster API + + Alternatively, you may stop the cluster by setting the `spec.componentSpecs.serviceVersion` field in the cluster configuration: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 14.8.0 # set to 14.8.0 for upgrading + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +### Monitor the Upgrade Process +During the upgrade, observe the changes in the cluster's Pods: +```bash +kubectl get pods -n demo -w +``` +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 97s +pg-cluster-postgresql-1 4/4 Running 0 50s +pg-cluster-postgresql-1 3/4 Running 2 (2s ago) 68s +pg-cluster-postgresql-0 4/4 Running 2 (6s ago) 2m6s +``` +**Key Observations:** +- The secondary replica ('pg-cluster-postgresql-1') is upgraded first. +- A switchover operation occurs, making the replica the new primary. +- Finally, the original primary ('pg-cluster-postgresql-0') is upgraded. + +After the upgrade is completed, roles are switched: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` +Updated Roles: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 2m secondary +pg-cluster-postgresql-1 4/4 Running 0 2m primary +``` + +## Verification + +### Check Cluster Status +Ensure the cluster is in the Running state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Running 17m +``` + +### Verify the PostgreSQL Version +Retrieve the PostgreSQL postgres credentials: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +Connect to the upgraded instances and verify the PostgreSQL version: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-1 -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -c "SELECT VERSION();" +``` + +## Summary +In this guide, you learned how to: +- Deploy a PostgreSQL replication cluster using KubeBlocks. +- Perform a rolling upgrade of the PostgreSQL minor version with minimal downtime. +- Verify that the upgrade was successful. + +This rolling upgrade strategy ensures high availability by upgrading the replicas first, performing a switchover, and then upgrading the original primary instance. + strategy ensures high availability by upgrading the replicas first, performing a switchover, and then upgrading the original primary instance. diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx new file mode 100644 index 00000000..0fabbd66 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx @@ -0,0 +1,258 @@ +--- +title: Modify PostgreSQL Parameters +description: Learn how to modify dynamic and static PostgreSQL parameters in KubeBlocks using Reconfiguring OpsRequest to optimize database performance and availability. +keywords: [PostgreSQL, KubeBlocks, OpsRequest, dynamic parameters, static parameters, database configuration] +sidebar_position: 7 +sidebar_label: Modify PostgreSQL Parameters +--- + +# Modify PostgreSQL Parameters + +Database reconfiguration involves modifying parameters, settings, or configurations to optimize performance, security, or availability. Parameter changes fall into two categories: + +| Type | Restart Required | Scope | Example Parameters | +|------|------------------|-------|--------------------| +| **Dynamic** | No | Immediate effect | `max_connections` | +| **Static** | Yes | After restart | `shared_buffers` | + +For static parameters, KubeBlocks minimizes downtime by: +1. Modifying and restarting replica nodes first +2. Performing a switchover to promote the updated replica as primary (typically completes in milliseconds) +3. Restarting the original primary node + +This guide demonstrates how to modify both dynamic and static parameters of a PostgreSQL cluster managed by KubeBlocks using a Reconfiguring OpsRequest. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Check Parameter Values + +### Retrieve Credentials +KubeBlocks automatically creates a secret containing the PostgreSQL postgres credentials. Retrieve the credentials with the following commands: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +### Access PostgreSQL Cluster +To connect to the cluster's primary node, use the PostgreSQL client: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql +``` + +### Query Parameter Values + +Once connected, you can query the current value of 'max_connections' and 'shared_buffers': +```sql +postgres=# SHOW max_connections; + max_connections +----------------- + 56 +(1 row) + +postgres=# show pgaudit.log; + pgaudit.log +------------- + ddl,read,write +(1 row) + +postgres=# show shared_buffers; + shared_buffers +---------------- + 128MB +(1 row) +``` + +## Dynamic Parameter Example: Modifying max_connections and pgaudit.log + +Dynamic parameters like `max_connections` can be modified without restarting PostgreSQL. Changes take effect immediately, allowing you to: +- Adjust connection limits on-the-fly +- Modify audit logging levels +- Tune performance parameters + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-reconfigure-dynamic + namespace: demo +spec: + clusterName: pg-cluster + reconfigures: + - componentName: postgresql + parameters: + - key: max_connections + value: '100' + - key: pgaudit.log + value: ddl + type: Reconfiguring +``` + +This configuration: +- Changes `pgaudit.log` from default `ddl,read,write` to `ddl` only +- Increases `max_connections` from 56 to 100 + +The `pgaudit.log` parameter controls audit logging granularity. Available options: + +| Value | Description | +|----------|-------------| +| none | No additional logging is performed by pgAudit. | +| ddl | Logs all Data Definition Language (DDL) statements| +| dml | Logs all Data Manipulation Language (DML) statements +| role | Logs all role-related commands | +| read | Logs all read operations| +| write | Logs all write operations| +| function | Logs all function calls| +| misc | Logs miscellaneous commands| +| all | Logs everything| + + +Wait for the OpsRequest to complete: +```bash +kubectl get ops pg-reconfigure-dynamic -n demo -w +``` + +Example Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-reconfigure-dynamic Reconfiguring pg-cluster Running -/- 11s +pg-reconfigure-dynamic Reconfiguring pg-cluster Succeed -/- 31s +``` + +**Verifying the Configuration Change** + +Log into the PostgreSQL instance and confirm that the `max_connections` and `pgaudit.log` parameters have been updated: + +```sql +postgres=# show max_connections; + max_connections +----------------- + 100 +(1 row) + +postgres=# show pgaudit.log; + pgaudit.log +------------- + ddl +(1 row) +``` + +The output verifies both parameters were updated: +- `max_connections` increased to 100 +- `pgaudit.log` reduced to DDL-only logging + +## Static Parameter Example: Modifying shared_buffers + +Static parameters like `shared_buffers` require a restart. This example increases the buffer from 128MB to 256MB. + +Create a Reconfigure OpsRequest. Apply the following OpsRequest YAML to update the 'shared_buffers': + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: postgresql-reconfigure-static + namespace: demo +spec: + clusterName: pg-cluster + force: false + reconfigures: + - componentName: postgresql + parameters: + - key: shared_buffers + value: '256MB' + preConditionDeadlineSeconds: 0 + type: Reconfiguring +``` + +Check the status of the OpsRequest until it completes: + +```bash +kubectl get ops postgresql-reconfigure-static -n demo -w +``` +Example Output: +```bash +postgresql-reconfigure-static Reconfiguring pg-cluster Running -/- 5s +postgresql-reconfigure-static Reconfiguring pg-cluster Succeed -/- 31s +``` + +**Verify the Configuration Change** + +Log into the PostgreSQL instance and confirm that the `shared_buffers` parameter has been updated: + +```sql +postgres=# show shared_buffers; + shared_buffers +---------------- + 256MB +(1 row) +``` + +## Validity Checking on Reconfiguration + +KubeBlocks validates parameters before applying changes. For example, `max_connections` follow rules: + +```cue +max_connections?: int & >=6 & <=8388607 +``` +It means `max_connections` must be an integer ranging from 6 to 8388607. + +And if you somehow set a string to this value like: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: postgresql-reconfigure-invalid + namespace: demo +spec: + type: Reconfiguring + clusterName: pg-cluster + reconfigures: + - componentName: postgresql + parameters: + - key: max_connections + value: 'abc' +``` + +By checking the status of the OpsRequest +```bash +kubectl get ops postgresql-reconfigure-invalid -n demo +``` + +This OpsRequest fails fast. To checkout the details, you may describe the `Parameter` CR: + +```bash +kubectl describe parameter postgresql-reconfigure-invalid -n demo +``` + +And you will find message `failed to validate updated config: [failed to parse field max_connections: [strconv.Atoi: parsing "STRING": invalid syntax]]` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide covered modifying PostgreSQL parameters through KubeBlocks: +- Dynamic changes (e.g., `max_connections`) apply immediately +- Static changes (e.g., `shared_buffers`) require restart but with minimal downtime +- All changes are validated before application +- Configuration follows declarative management principles diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/08-switchover.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/08-switchover.mdx new file mode 100644 index 00000000..15aa5e03 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/08-switchover.mdx @@ -0,0 +1,179 @@ +--- +title: PostgreSQL Cluster Switchover +description: Perform planned role transitions in PostgreSQL clusters with KubeBlocks for minimal downtime and controlled maintenance +keywords: [PostgreSQL, KubeBlocks, Switchover, High Availability, Role Transition, Kubernetes] +sidebar_position: 8 +sidebar_label: PostgreSQL Switchover +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL Cluster Switchover + +A **switchover** is a planned operation that transfers the primary role from one PostgreSQL instance to another. Unlike failover which occurs during failures, switchover provides: +- Controlled role transitions +- Minimal downtime (typically a few hundred milliseconds) +- Predictable maintenance windows + +Switchover is ideal for: +- Node maintenance/upgrades +- Workload rebalancing +- Testing high availability +- Planned infrastructure changes + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Check Roles +List the Pods and their roles (primary or secondary): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +Example Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 9m59s primary +pg-cluster-postgresql-1 4/4 Running 0 11m secondary +``` + +## Performing a Planned Switchover + +To initiate a planned switchover, create an OpsRequest resource as shown below: + + + + Option 1: Automatic Switchover (No preferred candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-switchover-ops + namespace: demo + spec: + clusterName: pg-cluster + type: Switchover + switchover: + - componentName: postgresql + instanceName: pg-cluster-postgresql-0 + ``` + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + + + + Option 2: Targeted Switchover (Specific candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-switchover-targeted + namespace: demo + spec: + clusterName: pg-cluster + type: Switchover + switchover: + - componentName: postgresql + # Specifies the instance whose role will be transferred. + # A typical usage is to transfer the leader role in a consensus system. + instanceName: pg-cluster-postgresql-0 + # If CandidateName is specified, the role will be transferred to this instance. + # The name must match one of the pods in the component. + # Refer to ComponentDefinition's Swtichover lifecycle action for more details. + candidateName: pg-cluster-postgresql-1 + ``` + + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + - `candidateName`: If candidate name is specified, the role will be transferred to this instance. + + + +## Monitoring the Switchover + +Monitor the switchover progress: + +```bash +kubectl get ops pg-switchover-ops -n demo -w +``` + +Expected Result: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-switchover-ops Switchover pg-cluster Succeed 1/1 17s +``` +## Verify the Switchover + +After the switchover is executed, the specified instance will be promoted to the primary role, while the previously primary instance will take on the secondary role. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +Expected Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 19m59s secondary +pg-cluster-postgresql-1 4/4 Running 0 21m primary +``` + +In this example: +- Pod 'pg-cluster-postgresql-1' has been promoted to the primary role. +- Pod 'pg-cluster-postgresql-0' has transitioned to the secondary role. + +## Troubleshooting + +### Common Switchover Issues + +If the switchover operation gets stuck, check these resources: +```bash +# Check agent logs on both current primary and candidate +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +## Summary + +This guide demonstrated how to: +1. Deploy a PostgreSQL HA cluster +2. Perform both automatic and targeted Switchover +3. Verify role transitions + +**Key takeaways:** +- Switchover enables controlled maintenance with minimal downtime (~100-500ms) +- KubeBlocks provides declarative operations for reliable role transitions +- Always verify: + - Cluster status immediately after switchover + - Application connectivity + - Replication health +- Check logs for troubleshooting: + - KubeBlocks operator (kb-system namespace) + - kbagent on database pods + diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..3d52b164 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,154 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed PostgreSQL Clusters +description: Learn how to decommission (take offline) a specific Pod in a PostgreSQL cluster managed by KubeBlocks. +keywords: [KubeBlocks, PostgreSQL, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission PostgreSQL Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed PostgreSQL Clusters + +This guide explains how to decommission (take offline) specific Pods in PostgreSQL clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + +To decommission a specific Pod (e.g., 'pg-cluster-postgresql-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-decommission-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + scaleIn: + onlineInstancesToOffline: + - 'pg-cluster-postgresql-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops pg-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-decommission-ops HorizontalScaling pg-cluster Succeed 1/1 33s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 1 + offlineInstances: + - pg-cluster-postgresql-1 # <----- Specify Pod to be decommissioned + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 6m12s +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx new file mode 100644 index 00000000..938ccf7d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx @@ -0,0 +1,320 @@ +--- +title: Recovering PostgreSQL Replica in KubeBlocks +description: How to rebuild a PostgreSQL replica in a replication cluster managed by KubeBlocks using in-place and non-in-place methods. +keywords: [KubeBlocks, PostgreSQL, Replica Rebuild, In-Place, Non-In-Place] +sidebar_position: 11 +sidebar_label: Recovering PostgreSQL Replica +--- + +# Rebuilding PostgreSQL Replicas in KubeBlocks + +This guide demonstrates how to rebuild replicas using both in-place and non-in-place methods. + +**What is Replica Rebuilding**? + +Replica rebuilding is the process of recreating a PostgreSQL replica from scratch or from a backup while maintaining: +- **Data Consistency**: Ensures the replica has an exact copy of primary data +- **High Availability**: Minimizes downtime during the rebuild process + +During this process: +1. The problematic replica is identified and isolated +2. A new base backup is taken from the primary +3. WAL (Write-Ahead Log) segments are streamed to catch up +4. The replica rejoins the replication cluster + +**When to Rebuild a PostgreSQL Instance**? + +Rebuilding becomes necessary in these common scenarios: +- Replica falls too far behind primary (irrecoverable lag), or Replication slot corruption +- WAL file gaps that can't be automatically resolved +- Data Corruption: with storage-level corruption (disk/volume issues), inconsistent data between primary and replica, etc +- Infrastructure Issues: Node failure, storage device failure or cross Zone/Region migration + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Connect to the Primary PostgreSQL Replcia and Write Mock Data + +Check replica roles with command: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 13m secondary +pg-cluster-postgresql-1 4/4 Running 0 12m primary +``` + +### Step 1: Connect to the Primary Instance + +KubeBlocks automatically creates a Secret containing the PostgreSQL postgres credentials. Retrieve the PostgreSQL postgres credentials: + +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +Connect to the primary replica through service `pg-cluster-postgresql-postgresql` which routes data to primary replica. + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h pg-cluster-postgresql-postgresql +``` + +### Step 2: Write Data to the Primary Instance +Connect to the primary instance and write a record to the database: + +```sql +postgrel> CREATE DATABASE test; +postgrel> \c test; +postgrel> CREATE TABLE t1 (id INT PRIMARY KEY, name VARCHAR(255)); +postgrel> INSERT INTO t1 VALUES (1, 'John Doe'); +``` + +### Step 3: Verify Data Replication + +Connect to the replica instance (e.g. pg-cluster-postgresql-0) to verify that the data has been replicated: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` +:::note +If the primary instance is 'pg-cluster-postgresql-0', you should connect to 'pg-cluster-postgresql-1' instead. Make sure to check the role of each instance before connecting. +::: + +```sql +postgrel> \c test; +postgrel> SELECT * FROM test.t1; +``` + +Example Output: +```bash + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +## Rebuild the Replica + +KubeBlocks provides two approaches for rebuilding replicas: in-place and non-in-place. + +### In-Place Rebuild + +**Workflow**: +1. Original Pod (e.g. 'pg-cluster-postgresql-0') is terminated +2. New Pod is created with same name, New PVC is provisioned. +3. Data is synchronized from primary + +Rebuild the replica in-place using the following configuration: +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-replica-inplace + namespace: demo +spec: + clusterName: pg-cluster + force: true + preConditionDeadlineSeconds: 0 + rebuildFrom: + - componentName: postgresql + inPlace: true # set inPlace to true + instances: + - name: pg-cluster-postgresql-0 + type: RebuildInstance +``` + +In this configuration, "pg-cluster-postgresql-0" refers to the instance name (Pod name) that will be repaired. + +Monitor the rebuild operation: +```bash +kubectl get ops pg-rebuild-replica-inplace -n demo -w +``` +Example Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 46s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 1/1 46s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Succeed 1/1 47s +``` + +Verify the Pods to confirm the replica ("pg-cluster-postgresql-0") , its PVC and PV have been recreated. +```bash +kubectl get po,pvc,pv -l app.kubernetes.io/instance=pg-cluster -ndemo +``` +Example Output: +```bash +kubectl get po,pvc,pv -l app.kubernetes.io/instance=pg-cluster -ndemo +NAME READY STATUS RESTARTS AGE +pod/pg-cluster-postgresql-0 4/4 Running 0 5m6s +pod/pg-cluster-postgresql-1 4/4 Running 0 14m + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +persistentvolumeclaim/data-pg-cluster-postgresql-0 Bound pvc-xxx 20Gi RWO 5m6s +persistentvolumeclaim/data-pg-cluster-postgresql-1 Bound pvc-yyy 20Gi RWO 14m +``` + +Connect to the replica and check if the data has been restored: + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` + +```sql +postgrel> \c test; +postgrel> select * from t1; + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +### Non-In-Place Rebuild + +**Workflow**: +1. New Pod (e.g. 'pg-cluster-postgresql-2') is created +2. Data is synchronized from primary +3. Original Pod is terminated after new replica is ready + +Rebuild the replica by creating a new instance: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-replica-non-inplace + namespace: demo +spec: + clusterName: pg-cluster + force: true + preConditionDeadlineSeconds: 0 + rebuildFrom: + - componentName: postgresql + inPlace: false + instances: + - name: pg-cluster-postgresql-0 + type: RebuildInstance +``` + +In this configuration, "pg-cluster-postgresql-0" refers to the instance name (Pod name) that will be repaired. + +Monitor the rebuild operation: +```bash +kubectl get ops pg-rebuild-replica-inplace -n demo -w +``` +Example Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 46s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 1/1 46s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Succeed 1/1 47s +``` + +```bash +kubectl get pods -l app.kubernetes.io/instance=pg-cluster -n demo -w +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 53m +pg-cluster-postgresql-1 4/4 Running 0 2m52s +pg-cluster-postgresql-2 0/4 Pending 0 0s +pg-cluster-postgresql-2 0/4 Pending 0 4s +pg-cluster-postgresql-2 0/4 Init:0/4 0 4s +pg-cluster-postgresql-2 0/4 Init:1/4 0 5s +pg-cluster-postgresql-2 0/4 Init:2/4 0 6s +pg-cluster-postgresql-2 0/4 Init:3/4 0 7s +pg-cluster-postgresql-2 0/4 PodInitializing 0 8s +pg-cluster-postgresql-2 2/4 Running 0 9s +pg-cluster-postgresql-2 2/4 Running 0 12s +pg-cluster-postgresql-2 2/4 Running 0 14s +pg-cluster-postgresql-2 3/4 Running 0 14s +pg-cluster-postgresql-2 3/4 Running 0 16s +pg-cluster-postgresql-2 4/4 Running 0 3m30s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +``` + +Connect to the new replica instance ('pg-cluster-postgresql-2') and verify the data: + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-2 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` + +```sql +postgrel> \c test; +postgrel> select * from t1; + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +### Rebuild from Backups + +This configuration below shows recovering a failed replica by restoring it from a known backup using `backupName`: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-from-backup + namespace: demo +spec: + clusterName: pg-cluster + force: true + rebuildFrom: + - backupName: + componentName: postgresql + inPlace: true + instances: + - name: pg-cluster-postgresql-1 + type: RebuildInstance +``` +### Rebuild to Specific Node + +To rebuild the new replica on the specific node, you may use `targetNodeName`: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-from-backup + namespace: demo +spec: + clusterName: pg-cluster + force: true + rebuildFrom: + - backupName: + componentName: postgresql + inPlace: true + instances: + - name: pg-cluster-postgresql-1 + targetNodeName: # new pod will be scheduled to the specified nod + type: RebuildInstance +``` + +## Summary +Key takeaways: +- In-Place Rebuild: Successfully rebuilt the replica and restored the deleted data. +- Non-In-Place Rebuild: Created a new replica instance and successfully restored the data. + +Both methods effectively recover the replica and ensure data consistency. diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..1cfd43c9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,163 @@ +--- +title: Create a Backup Repository for KubeBlocks +description: Learn how to create and configure a BackupRepo for KubeBlocks using an S3 bucket to store backup data. +keywords: [KubeBlocks, Backup, BackupRepo, S3, Kubernetes] +sidebar_position: 1 +sidebar_label: Create BackupRepo +--- + +# Create a BackupRepo for KubeBlocks + +This guide walks you through creating and configuring a BackupRepo in KubeBlocks using an S3 bucket for storing backup data. + +## Prerequisites +- AWS CLI configured with appropriate permissions to create S3 buckets. +- kubectl access to your Kubernetes cluster. +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) and running in the kb-system namespace. + +## Step 1: Create S3 Bucket + +Use the AWS CLI to create an S3 bucket in your desired region. Replace `` with your target AWS region (e.g., `us-east-1`, `ap-southeast-1`). + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +Example (for us-west-1): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +Example Output: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +Verification: +Confirm the bucket was created by listing its contents (it will be empty initially): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## Step 2: Create a Kubernetes Secret for AWS Credentials + +Store your AWS credentials securely in a Kubernetes Secret. Replace `` and `` with your actual AWS credentials: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## Step 3: Configure Backup Repository + +A BackupRepo is a custom resource that defines a storage repository for backups. In this step, you'll integrate your S3 bucket with KubeBlocks by creating a BackupRepo resource. + +Apply the following YAML to create the BackupRepo. Replace fields(e.g., bucket name, region) with your specific settings. + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # mark this backuprepo as default one + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # Currently, KubeBlocks supports configuring various object storage services as backup repositories + # - s3 (Amazon Simple Storage Service) + # - oss (Alibaba Cloud Object Storage Service) + # - cos (Tencent Cloud Object Storage) + # - gcs (Google Cloud Storage) + # - obs (Huawei Cloud Object Storage) + # - minio, and other S3-compatible services. + storageProviderRef: s3 + # Specifies the access method of the backup repository. + # - Tool + # - Mount + accessMethod: Tool + # Specifies reclaim policy of the PV created by this backup repository. + pvReclaimPolicy: Retain + # Specifies the capacity of the PVC created by this backup repository. + volumeCapacity: 100Gi + # Stores the non-secret configuration parameters for the StorageProvider. + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # References to the secret that holds the credentials for the StorageProvider. + credential: + # name is unique within a namespace to reference a secret resource. + name: s3-credential-for-backuprepo + # namespace defines the space within which the secret name must be unique. + namespace: kb-system +``` + +:::note + +The `dataprotection.kubeblocks.io/is-default-repo: 'true'` annotation marks this BackupRepo as the default one. When creating backups, if no specific BackupRepo is specified, KubeBlocks will use the one marked as default. + +::: + +## Step 4: Verify Backup Repository Status + +Check the status of the BackupRepo to ensure it is correctly initialized: + +```bash +kubectl get backuprepo s3-repo -w +``` + +Expected Status Flow: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +**Troubleshooting** + +When creating a new BackupRepo, KubeBlocks runs a precheck job to test the connection and read/write capabilities. If the BackupRepo status shows `Failed`, follow these troubleshooting steps: + +1. Check the BackupRepo status and error details: + ```bash + kubectl describe backuprepo + ``` + +2. Verify your configuration: + - Confirm the bucket name and region match your S3 settings + - Double-check that the AWS credentials in the Secret are valid + - Ensure there is network connectivity between KubeBlocks and AWS S3 + +The precheck job must complete successfully for the BackupRepo to become `Ready` for use. + + +## How to configure a BackupRepo for other storage providers + +KubeBlocks supports the following storage providers as backup repositories: + +| Storage Provider | Description | +|-----------------|-------------| +| OSS | Alibaba Cloud Object Storage Service | +| S3 | Amazon Simple Storage Service | +| COS | Tencent Cloud Object Storage | +| GCS | Google Cloud Storage | +| OBS | Huawei Cloud Object Storage | +| MinIO | Self-hosted Object Storage | +| S3-compatible | Other S3-compatible storage services | + +To get the full list of `StorageProvider`s installed: +```bash +kubectl get storageproviders.dataprotection.kubeblocks.io +``` + +For detailed instructions on configuring a BackupRepo for other storage providers, please refer to the [BackupRepo Introduction](../../user_docs/backup-restore/backuprepo). \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..923943f3 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,261 @@ +--- +title: Create a Full Backup for a PostgreSQL Cluster on KubeBlocks +description: Step-by-step guide to creating and validating full backups for PostgreSQL clusters using Backup API and OpsRequest API in KubeBlocks. +keywords: [PostgreSQL, Full Backup, KubeBlocks, Kubernetes, Database Backup, XtraBackup] +sidebar_position: 2 +sidebar_label: Create Full Backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Create a Full Backup for PostgreSQL on KubeBlocks + +This guide demonstrates how to create and validate full backups for PostgreSQL clusters on KubeBlocks using the `pg-basebackup` method through both: +- The Backup API (direct backup operations) +- The OpsRequest API (managed backup operations with enhanced monitoring) + +We will cover how to restore data from a backup in the [Restore From Full Backup](./05-restoring-from-full-backup) guide. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Backup Prerequisites + +Before creating backups, ensure: +1. Backup repository is configured: + - `BackupRepo` resource exists + - Network connectivity between cluster and repository + - `BackupRepo` status shows "Ready" + +2. Cluster is ready: + - Cluster status is "Running" + - No ongoing operations (scaling, upgrades, etc.) + +## Identify Backup Configuration + +Check available backup policies and schedules: + +```bash +# List backup policies +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=pg-cluster + +# List backup schedules +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +Expected Output: +```bash +NAME BACKUP-REPO STATUS AGE +pg-cluster-postgresql-backup-policy Available 58m + +NAME STATUS AGE +pg-cluster-postgresql-backup-schedule Available 60m +``` +### BackupPolicy + +`BackupPolicy` defines a list of backup methods and their configurations. KubeBlocks automatically generates a `BackupPolicy` for each database cluster if it supports backup (`BackupPolicyTemplate` is defined). + +:::tip +To view the list of BackupPolicyTemplate, you can run the following command: +```bash +kubectl get backuppolicytemplate -n demo -l app.kubernetes.io/name=postgresql +``` +::: + +View supported backup methods in the BackupPolicy CR `pg-cluster-postgresql-backup-policy`: + +```bash +kubectl get backuppolicy pg-cluster-postgresql-backup-policy -n demo -oyaml | yq '.spec.backupMethods[] | .actionSetName + ", " + .name' +``` + +Example Output: + +| ActionSet Name | Method Name | +|-------------------------------|--------------------| +| postgresql-basebackup | pg-basebackup | +| null | volume-snapshot | +| postgresql-wal-g | wal-g | +| postgres-wal-g-incremental | wal-g-incremental | +| postgresql-for-pitr | archive-wal | +| postgres-wal-g-pitr | wal-g-archive | + + +- `ActionSetName` refers to the name of the `ActionSet` object that defines the backup and restore actions. If `ActionSetName` is `null`, it means the backup method does not require an `ActionSet` +- `Method Name` is simply a name of the backup method given by users or defined in the `BackupPolicyTemplate`. It will be referenced by the `BackupSchedule` resource. + +:::tip + +To check the definition of the `ActionSet` object, you can run the following command: +```bash +kubectl get actionset postgresql-wal-g -oyaml # where postgresql-basebackup is the ActionSetName +``` +::: + + +**List of Backup methods** + +KubeBlocks PostgreSQL supports these backup methods: + +| Feature | Method | Description | +|-------------------|-----------------|-------------| +| Full Backup | pg-basebackup | Uses `pg_basebackup`, a PostgreSQL utility to create a base backup | +| Full Backup | wal-g | Uses `wal-g` to create a full backup (requires WAL-G configuration) | +| Continuous Backup | archive-wal | Uploads PostgreSQL Write-Ahead Logging (WAL) files periodically to the backup repository, usually paired with `pg-basebackup`| +| Continuous Backup | wal-g-archive | Uploads PostgreSQL Write-Ahead Logging (WAL) files periodically to the backup repository, usually paired with `wal-g`| + +:::note + +- It is recommended to pair `pg-basebackup` with `archive-wal`, and pair `wal-g` with `wal-g-archive`. +- Method `pg-basebackup` can be enabled alone to create a full backup. +- Method `wal-g` cannot be enabled alone. It must be paired with `wal-g-archive`. + +::: + +### BackupSchedule + +`BackupSchedule` defines a schedule for backups. It references the `BackupPolicy` resource and the `BackupMethod` name. + +View the `BackupSchedule` resource `pg-cluster-postgresql-backup-schedule`: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml | yq '.spec.schedules[] | .backupMethod + "," + .enabled' +``` + +Example Output: + +| Backup Method | Enabled | +|----------------------|---------| +| pg-basebackup | false | +| wal-g | false | +| archive-wal | false | +| wal-g-archive | false | + +By default, all backup methods are disabled. You can enable it by setting `enabled` to `true` on demand. As introduced previously, there are two `FULL` backup methods: `pg-basebackup` and `wal-g`. As `wal-g` cannot be enabled alone, we use `pg-basebackup` in this guide. + +## Step 1. Create On-Demand Backup (using pg-basebackup) +Backup via Backup API + +### Option 1. Using Backup API + +Apply this manifest to create a backup: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: pg-cluster-pg-basebackup2 + namespace: demo +spec: + backupMethod: pg-basebackup + backupPolicyName: pg-cluster-postgresql-backup-policy + # Determines whether the backup contents stored in the backup repository should be deleted + # when the backup custom resource(CR) is deleted. Supported values are `Retain` and `Delete`. + # - `Retain` means that the backup content and its physical snapshot on backup repository are kept. + # - `Delete` means that the backup content and its physical snapshot on backup repository are deleted. + deletionPolicy: Delete +``` + +### Option 2. Using OpsRequest API + +Execute a backup using the OpsRequest API with the 'pg-basebackup' method: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-backup + namespace: demo +spec: + clusterName: pg-cluster + force: false + backup: + backupPolicyName: pg-cluster-postgresql-backup-policy + backupMethod: pg-basebackup + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + + +## Step 2. Monitor Backup and Verify Completion + +You can track the Backup progress until status shows "Completed". + +```bash +kubectl get backup pg-cluster-pg-basebackup -n demo -w +``` + +Example Output: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +pg-cluster-pg-basebackup pg-cluster-postgresql-backup-policy pg-basebackup Completed 4722262 10s Delete 2025-05-16T02:53:45Z 2025-05-16T02:53:55Z +``` + +## Step 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Check files in the BackupRepo + +The `Backup` resource records details in `.status` including: +- Storage path +- Time range +- Backup file size + +## Troubleshooting + +When encountering backup issues, such as Backup status is `Failed` or stuck in `Running`for quite a long time, follow these steps to diagnose and resolve the problem: + +1. Inspect the Backup resource for any error events or status updates: + ```bash + kubectl describe backup -n demo + ``` + +2. Verify the backup job status and examine its logs: + KubeBlocks runs a Job to create a full backup. If the backup task gets stuck, you can track the Job progress: + ```bash + kubectl -n demo get job -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection + ``` + + And check pod logs: + ```bash + kubectl -n demo logs + ``` + + This job will be deleted when the backup completes. + +3. Review KubeBlocks controller logs for detailed error information: + + ```bash + kubectl -n kb-system logs deploy/kubeblocks -f + ``` + +## Summary + +This guide covered: +1. Deploying a replication PostgreSQL cluster +2. Creating full backups using: + - Direct Backup API + - Managed OpsRequest API +3. Monitoring and validating backups + +Your PostgreSQL data is now securely backed up and ready for restoration when needed. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..f179b4fa --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,222 @@ +--- +title: Setting Up a PostgreSQL Cluster with Scheduled Backups in KubeBlocks +description: Learn how to deploy a PostgreSQL cluster using KubeBlocks and configure automated scheduled backups with retention in an S3 repository. +keywords: [PostgreSQL, Backup, KubeBlocks, Scheduled Backup, Kubernetes] +sidebar_position: 3 +sidebar_label: Scheduled Backups +--- + + +# Setting Up a PostgreSQL Cluster with Scheduled Backups in KubeBlocks + +This guide demonstrates how to deploy a PostgreSQL cluster using KubeBlocks and configure scheduled backups with retention in an S3 repository. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## Configure Scheduled Backups (using pg-basebackup and archive-wal) + +`BackupSchedule` defines a schedule for backups. It references the `BackupPolicy` resource and the `BackupMethod` name. + +KubeBlocks automatically creates a `BackupSchedule` resource when the cluster is created. Follow these steps to enable and configure scheduled backups: + +### View default backup schedule configuration: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: pg-cluster-postgresql-backup-policy + schedules: + - backupMethod: pg-basebackup + # ┌───────────── minute (0-59) + # │ ┌───────────── hour (0-23) + # │ │ ┌───────────── day of month (1-31) + # │ │ │ ┌───────────── month (1-12) + # │ │ │ │ ┌───────────── day of week (0-6) (Sunday=0) + # │ │ │ │ │ + # 0 18 * * * + # schedule this job every day at 6:00 PM (18:00). + cronExpression: 0 18 * * * # update the cronExpression to your need + enabled: false # set to `true` to schedule base backup periodically + retentionPeriod: 7d # set the retention period to your need +``` + +To check if an backup method is enabled, you can run the following command: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml | yq '.spec.schedules[] | .backupMethod + "," + .enabled' +``` + +Example Output: + +| Backup Method | Enabled | +|----------------------|---------| +| pg-basebackup | false | +| wal-g | false | +| archive-wal | false | +| wal-g-archive | false | + +By default, all backup methods are disabled. You can enable it by setting `enabled` to `true` on demand. As introduced previously, there are two `FULL` backup methods: `pg-basebackup` and `wal-g`. As `wal-g` cannot be enabled alone, we use `pg-basebackup` in this guide. + +There are two ways to enable a backup method and we will introduce them in the following sections. + +### Option 1. Edit the BackupSchedule resource + +Enable and customize the backup schedule: +```bash +kubectl edit backupschedule pg-cluster-postgresql-backup-schedule -n demo +``` + +Update these key parameters: +- `enabled`: Set to `true` to activate scheduled backups +- `cronExpression`: Configure backup frequency using cron syntax +- `retentionPeriod`: Set how long to keep backups (e.g., `7d`, `1mo`) + +Example configuration for daily backups at 6PM UTC with 7-day retention: +```yaml +schedules: +- backupMethod: pg-basebackup + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +### Option 2. Edit the Cluster resource with backup method + +Or you can patch an existing cluster to enable scheduled backup: + +```bash +kubectl patch cluster pg-cluster -n demo --type='merge' -p=' +{ + "spec": { + "backup": { + "retentionPeriod": "7d", + "method": "pg-basebackup", + "enabled": true, + "incrementalBackupEnabled": false, + "pitrEnabled": false, + "cronExpression": "0 18 * * *", + "repoName": "s3-repo" + } + } +}' +``` + +## Verify the configuration + +### View schedule configuration +```bash +# Check schedule status +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml | yq '.spec.schedules[] | .backupMethod + "," + .enabled' +``` + +Example Output: + +| Backup Method | Enabled | +|----------------------|---------| +| pg-basebackup | true | +| wal-g | false | +| archive-wal | false | +| wal-g-archive | false | + +Only `pg-basebackup` is enabled. + +### Check CronJob + +KubeBlocks create a CronJob to schedule full backups. You can check the CronJob status: + +```bash +kubectl get cronjob -n demo -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection +``` + +Example Output: + +```bash +NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE +b387c27b-pg-cluster-postgresql-pg-basebackup 0 18 * * * UTC False 0 12h 3d5h +``` + +If there is no CronJob, please check the `BackupSchedule` resource the full backup method `pg-basebackup` is enabled. + +## Monitoring and Managing Backups + +After enabling scheduled backups, monitor their execution and manage backup retention: + +1. View all backups: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +2. Inspect backup details: + +```bash +kubectl describe backup -n demo +``` + +3. Verify backup artifacts: +- Status should show "Completed" +- Check backup size matches expectations +- Confirm retention period is being applied +- Validate backup files exist in repository + +4. Manage backup retention: +- To manually delete old backups: +```bash +kubectl delete backup -n demo +``` +- To modify retention period: +```bash +kubectl edit backupschedule pg-cluster-postgresql-backup-schedule -n demo +``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated: +1. Configuration of automated PostgreSQL backups +2. Schedule customization using cron syntax +3. Retention policy management +4. Backup verification procedures + +Your PostgreSQL cluster now has: +- Regular automated backups +- Configurable retention policies +- Complete backup history tracking diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..d2f20896 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,424 @@ +--- +title: Setting Up a PostgreSQL Cluster with Scheduled Continuous Backup in KubeBlocks +description: Learn how to set up a PostgreSQL cluster with scheduled full backups and continuous incremental backups enabled in KubeBlocks. +keywords: [PostgreSQL, Backup, PITR, KubeBlocks, Kubernetes] +sidebar_position: 4 +sidebar_label: Scheduled Continuous Backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Setting Up a PostgreSQL Cluster with Scheduled Continuous Backup Enabled in KubeBlocks + +This guide demonstrates how to configure a PostgreSQL cluster on KubeBlocks with: + +- Scheduled full backups (base backups) +- Continuous WAL (Write-Ahead Log) archiving +- Point-In-Time Recovery (PITR) capabilities + +This combination provides comprehensive data protection with minimal recovery point objectives (RPO). + +## What is PITR? +Point-In-Time Recovery (PITR) allows you to restore a database to a specific moment in time by combining full backups with continuous binlog/wal/archive log backups. + +For details on restoring data from both full backups and continuous binlog backups, refer to the [Restore From PITR](restore-with-pitr.mdx) guide. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +3. Check the list of backup methods. + + As introduced in the [Create Full Backup](./02-create-full-backup) guide, KubeBlocks PostgreSQL supports these backup methods: + + | Feature | Method | Description | + |-------------------|-----------------|-------------| + | Full Backup | pg-basebackup | Uses `pg_basebackup`, a PostgreSQL utility to create a base backup | + | Full Backup | wal-g | Uses `wal-g` to create a full backup (requires WAL-G configuration) | + | Continuous Backup | archive-wal | Uploads PostgreSQL Write-Ahead Logging (WAL) files periodically to the backup repository, usually paired with `pg-basebackup`| + | Continuous Backup | wal-g-archive | Uploads PostgreSQL Write-Ahead Logging (WAL) files periodically to the backup repository, usually paired with `wal-g`| + + :::note + + - It is recommended to pair `pg-basebackup` with `archive-wal`, and pair `wal-g` with `wal-g-archive`. + - Method `pg-basebackup` can be enabled alone to create a full backup. + - Method `wal-g` cannot be enabled alone. It must be paired with `wal-g-archive`. + + ::: + In this guide, we will show how to deploy a PostgreSQL cluster with different combinations of backup methods. You may choose one of the following combinations. + - pg-basebackup and archive-wal + - wal-g and wal-g-archive (**Recommended**) + + +## Deploy a PostgreSQL Cluster, with pg-basebackup and archive-wal + +Deploy a 2-node PostgreSQL replication cluster (1 primary, 1 secondary) and specify backup information. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + backup: + retentionPeriod: 7d + # for full backup + method: pg-basebackup # full backup methnod name + enabled: true + cronExpression: 0 18 * * * # full backup scheuler + # for continuous backup + continuousMethod: archive-wal # continuous backup method + pitrEnabled: true # enable continous method or not + repoName: s3-repo # specify backuprepo, if not specified, the BackupRepo annotated as `default` will be used. +``` + +Or you can patch an existing cluster to enable scheduled continuous backup: +```bash +kubectl patch cluster pg-cluster -n demo --type='merge' -p=' +{ + "spec": { + "backup": { + "retentionPeriod": "7d", + "method": "pg-basebackup", + "enabled": true, + "cronExpression": "0 18 * * *", + "continuousMethod": "archive-wal", + "pitrEnabled": true, + "repoName": "s3-repo" + } + } +}' +``` + +**Key Configuration Fields Explained** + +| Field | Value | Description | +|-------|-------|-------------| +| `backup.enabled` | `true` | Enables scheduled backups | +| `method` | `pg-basebackup` | Full backup method using PostgreSQL's native utility | +| `cronExpression` | `0 18 * * *` | Daily full backup at 6PM UTC | +| `retentionPeriod` | `7d` | Retains backups for 7 days | +| `repoName` | `s3-repo` | Backup repository name (S3-compatible storage) | +| `pitrEnabled` | `true` | Enables continuous WAL archiving for PITR | +| `continuousMethod` | `archive-wal` | Method for continuous WAL archiving | + +## Verifying the Deployment + +### Monitor the Cluster Status +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Example Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` + +Once the cluster status becomes Running, your PostgreSQL cluster is ready for use. + +### Check Backups are Scheduled Correctly + +Verify full backup and continuous backups are configured correctly: +1. Check full backup task is scheduled correctly. KubeBlocks create CronJob to schedule full backup. + ```bash + kubectl get cronjob -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection -n demo + ``` + +2. Check continuous backup is RUNNING correctly. KubeBlocks create a StatefulSet to run continuous backup. + ```bash + kubectl get sts -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection -n demo + ``` + +3. Check continuous Backup resource is created correctly. + + ```bash + kubectl get backup -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection,dataprotection.kubeblocks.io/backup-type=Continuous -n demo + ``` + + Example Output: + ```bash + NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME + b387c27b-pg-cluster-postgresql-archive-wal pg-cluster-postgresql-backup-policy archive-wal Running Delete 2025-05-16T02:58:10Z + ``` + +### Verify Backup Configuration + +KubeBlocks automatically creates a `BackupSchedule` resource. Inspect the configuration: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +... +spec: + backupPolicyName: pg-cluster-postgresql-backup-policy + schedules: + - backupMethod: pg-basebackup + cronExpression: 0 18 * * * + enabled: true + retentionPeriod: 7d + - backupMethod: archive-wal + cronExpression: '*/5 * * * *' + enabled: true + name: archive-wal + retentionPeriod: 7d +``` + +## Deploy a PostgreSQL Cluster, with wal-g and wal-g-archive + +Deploy a 2-node PostgreSQL replication cluster (1 primary, 1 secondary) + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + backup: + retentionPeriod: 7d + # for full backup + method: wal-g # full backup methnod name + enabled: true + cronExpression: 0 18 * * * # full backup scheuler + # for continuous backup + continuousMethod: wal-g-archive # continuous backup method + pitrEnabled: true # enable continous method or not + repoName: s3-repo # specify backuprepo, if not specified, the BackupRepo annotated as `default` will be used. +``` + +Or you can patch an existing cluster to enable scheduled continuous backup: + +```bash +kubectl patch cluster pg-cluster -n demo --type='merge' -p=' +{ + "spec": { + "backup": { + "retentionPeriod": "7d", + "method": "wal-g", + "enabled": true, + "cronExpression": "0 18 * * *", + "continuousMethod": "wal-g-archive", + "pitrEnabled": true, + "repoName": "s3-repo" + } + } +}' +``` + +You may use the steps introduced in the [Deploy a PostgreSQL Cluster, with wal-g and wal-g-archive](#deploy-a-postgresql-cluster-with-wal-g-and-wal-g-archive) section to check the deployment of the cluster. + +Apart from that, you need to check if `archive_command` is configured correctly. + +:::note + +Method `wal-g` and `wal-g-archive` work only when PostgreSQL's `archive_command` is configured properly. + +Kubeblocks will do it automatically for you when method `wal-g-archive` is enabled. + +::: + +KubeBlocks will create a Reconfiguration OpsRequest to set the `archive_command` to `envdir /home/postgres/pgdata/wal-g/env /home/postgres/pgdata/wal-g/wal-g wal-push %p` + +To monitor the progress of the reconfiguration, you can check the status of the OpsRequest: +```bash +kubectl get opsrequest -l app.kubernetes.io/instance=pg-cluster,operations.kubeblocks.io/ops-type=Reconfiguring -n demo +``` + +If the OpsRequest is `Failed` or is `Pending` for quite a long time, you need to check the logs of the OpsRequest to see the error message. Otherwise the wal-g-archive and wal-g will not work. + + +## How to check the progress of continuous backup + +Each continuous backup methods upload WAL files to the backup repository periodically (every few seconds). + +:::tip + +If there is no wal log to upload, the backup pod has nothing to do. Then backup's total-size won't increase and backup's `timeRange` won't change. + +You may perform a sysbench benchmark against the cluster or you can run the python script [pg_benchmark.py](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/postgresql/test/pg_benchmark.py) to benchmark the cluster. + +::: + +Check the progress of continuous backup by following the steps below: + +1. checking the backup resource. + + ```bash + kubectl get backup -l app.kubernetes.io/instance=pg-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo + ``` + + Example Output: + ```bash + NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME + b387c27b-pg-cluster-postgresql-archive-wal pg-cluster-postgresql-backup-policy archive-wal Running 104325519 Delete 2025-05-16T02:58:10Z + ``` + + - `TOTAL-SIZE` is the total size of the WAL files uploaded to the backup repository. It will increase over time. + +2. checking the backup pod logs. + + + + + + ```bash + kubectl logs b387c27b-pg-cluster-postgresql-archive-wal-0 -n demo -c backupdata + ``` + where `b387c27b-pg-cluster-postgresql-archive-wal-0` is the name of the backup pod, and `backupdata` is the container name. + + Example Output: + ```bash + 2025-05-16 06:36:12 INFO: start to upload the wal log which maybe misses + 2025-05-16 06:36:13 INFO: start to archive wal logs + 2025-05-16 06:46:14 INFO: start to switch wal file + 2025-05-16 06:56:16 INFO: start to switch wal file + 2025-05-16 07:05:18 INFO: upload 000000010000000000000004 + 2025-05-16 07:05:20 INFO: upload 000000010000000000000005 + 2025-05-16 07:06:17 INFO: upload 000000010000000000000006 + 2025-05-16 07:09:44 INFO: upload 000000010000000000000007 + 2025-05-16 07:09:46 INFO: upload 000000010000000000000008 + 2025-05-16 07:09:48 INFO: upload 000000010000000000000009 + ... + ``` + + + + + ```bash + kubectl logs b387c27b-pg-cluster-postgresql-wal-g-archive-0 -n demo -c backupdata + ``` + + Where `b387c27b-pg-cluster-postgresql-wal-g-archive-0` is the name of the backup pod, and `backupdata` is the container name. + + Example Output: + ```bash + 2025-05-16 08:16:59 INFO: start to archive and update wal infos + 2025-05-16 08:16:59 INFO: config wal-g environment variables... + 2025-05-16 08:25:18 INFO: upload 000000010000000000000022 ... + INFO: 2025/09/08 08:25:18.904842 Files will be uploaded to storage: default + INFO: 2025/09/08 08:25:22.729796 FILE PATH: 000000010000000000000023.zst + INFO: 2025/09/08 08:25:22.845932 FILE PATH: 000000010000000000000022.zst + 2025-05-16 08:25:22 INFO: WAL-G upload succeeded for 000000010000000000000022 + 2025-05-16 08:25:22 INFO: upload 000000010000000000000023 ... + INFO: 2025/09/08 08:25:22.883391 Files will be uploaded to storage: default + 2025-05-16 08:25:22 INFO: WAL-G upload succeeded for 000000010000000000000023 + 2025-05-16 08:25:24 INFO: start time of the oldest wal: 2025-05-16T08:09:45Z, end time of the latest wal: 2025-05-16T08:25:17Z, total size: 14938340 + 2025-05-16 08:26:23 INFO: upload 000000010000000000000024 ... + INFO: 2025/09/08 08:26:23.166320 Files will be uploaded to storage: default + INFO: 2025/09/08 08:26:25.738435 FILE PATH: 000000010000000000000025.zst + INFO: 2025/09/08 08:26:25.784766 FILE PATH: 000000010000000000000024.zst + 2025-05-16 08:26:25 INFO: WAL-G upload succeeded for 000000010000000000000024 + 2025-05-16 08:26:25 INFO: upload 000000010000000000000025 ... + INFO: 2025/09/08 08:26:25.822544 Files will be uploaded to storage: default + 2025-05-16 08:26:25 INFO: WAL-G upload succeeded for 000000010000000000000025 + 2025-05-16 08:26:27 INFO: start time of the oldest wal: 2025-05-16T08:09:45Z, end time of the latest wal: 2025-05-16T08:26:20Z, total size: 29775523 + ``` + + + +3. checking the backup's `timeRange`, it should increase over time. + + ```bash + kubectl get backup b387c27b-pg-cluster-postgresql-archive-wal -n demo -oyaml | yq '.status.timeRange' + ``` + where `b387c27b-pg-cluster-postgresql-archive-wal` is the name of the continuous backup. + + Example Output: + ```bash + timeRange: + end: "2025-05-16T07:09:48Z" + start: "2025-05-16T06:36:12Z" + ``` + + - `timeRange.start` is the start time of the backup (earliest PostgreSQL transaction time). + - `timeRange.end` is the end time of the backup (latest PostgreSQL transaction time). + +## Summary + +This guide covered: +1. Configuring scheduled full backups with pg-basebackup +2. Enabling continuous WAL archiving with wal-g-archive +3. Setting up Point-In-Time Recovery (PITR) capabilities +4. Monitoring backup operations + +Key Benefits: +- Scheduled full backups ensure regular recovery points +- Continuous WAL archiving minimizes potential data loss +- PITR enables recovery to any moment in time \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..8e56059c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,210 @@ +--- +title: Restore a PostgreSQL Cluster from Backup +description: Learn how to restore a new PostgreSQL cluster from an existing backup in KubeBlocks using the Cluster Annotation or OpsRequest API. +keywords: [PostgreSQL, Restore, Backup, KubeBlocks, Kubernetes] +sidebar_position: 5 +sidebar_label: Restore PostgreSQL Cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a PostgreSQL Cluster from Backup + +This guide demonstrates two methods to restore a PostgreSQL cluster from backup in KubeBlocks: + +1. **Cluster Annotation Method** - Simple declarative approach using YAML annotations +2. **OpsRequest API Method** - Enhanced operational control with progress monitoring + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Preparing for Restoration: Locate one Full Backup +Before restoring, ensure that there is a full backup available. The restoration process will use this backup to create a new PostgreSQL cluster. + +- Backup repository accessible from new cluster +- Valid full backup in `Completed` state +- Adequate CPU/memory resources +- Sufficient storage capacity + +Find available full backups: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=pg-cluster # get the list of full backups +``` + +Pick ONE of the Backups whose status is `Completed`. + + +## Step 1: Initiate a Restore + +### Option 1: Restore a Cluster via Cluster Annotation + +Create a new cluster with restore configuration: + +Key parameters: +- `kubeblocks.io/restore-from-backup` annotation +- Backup name and namespace located from the previous steps + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-restored + namespace: demo + annotations: + # NOTE: + # - replcae with the backup name + # - specify the namespace of the backup using + kubeblocks.io/restore-from-backup: '{"postgresql":{"name":"","namespace":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +The json string in the annotation is of structure: +```json +{ + "postgresql": { + "name": "", + "namespace": "", + "volumeRestorePolicy": "Parallel" + } +} +``` +- `postgresql`: the component name in the cluster (check `cluster.spec.componentSpecs[].name`) +- `name`: the full backup name +- `namespace`: the namespace of the backup +- `volumeRestorePolicy`: the volume restore policy, `Parallel` or `Serial` + +### Option 2: Restore a Cluster via Restore OpsRequest + +Create a `Restore` OpsRequest: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-restore-ops + namespace: demo +spec: + clusterName: pg-restore # restored cluster name + restore: + backupName: # replace it with your full backup name + backupNamespace: # replace it with your backup namespace + type: Restore +``` + +## Step 2: Monitor Restoration + +1. Check component events: + + ```bash + # describe component postgresql + kubectl describe cmp pg-restored-postgresql -n demo + ``` + + It will show the following events. When all the restore tasks are completed, the component will be in `Running` state. + ```bash + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Warning 2m28s component-controller config/script template has no template specified: postgresql-configuration + Normal NeedWaiting 2m19s (x7 over 2m28s) component-controller waiting for restore "pg-restored-postgresql-53bf2e93-preparedata" successfully + Normal Unknown 2m19s component-controller the component phase is unknown + Normal ComponentPhaseTransition 2m19s (x2 over 2m19s) component-controller component is Creating + Normal Unavailable 2m19s (x2 over 2m19s) component-controller the component phase is Creating + Normal ComponentPhaseTransition 119s component-controller component is Running + Normal Available 119s component-controller the component phase is Running + Normal NeedWaiting 119s component-controller waiting for restore "pg-restored-postgresql-53bf2e93-postready" successfully + ``` + +2. Check restore status: + + ```bash + # Watch restore status + kubectl get restore -n demo + ``` + + There will be two restore resources created, one is for the data preparation, and the other is for the post-ready tasks. + ```bash + NAME BACKUP RESTORE-TIME STATUS DURATION CREATION-TIME COMPLETION-TIME + pg-restored-postgresql-5e9dd0bd-postready pg-cluster-pg-basebackup Completed 1s 2025-05-16T07:32:11Z 2025-05-16T07:32:11Z + pg-restored-postgresql-5e9dd0bd-preparedata pg-cluster-pg-basebackup Completed 9s 2025-05-16T07:31:42Z 2025-05-16T07:31:51Z + ``` + +## Troubleshooting + +If the restoration is stuck, you can check the status + +1. Check the status of the OpsRequest if any + ```bash + kubectl get opsrequest pg-restore-ops -n demo + ``` + +2. Describe the component, check if there is any error message + ```bash + kubectl describe cmp pg-restored-postgresql -n demo + ``` + +3. Describe restore resource + ```bash + kubectl describe restore -n demo + ``` + It will show the status of restore and the Job created if any, for example: + ```text + Normal CreateRestoreJob 44m restore-controller created job demo/restore-preparedata-cbdbbf60-backup-demo-pg-cluster + ``` + +4. Check restore jobs and its logs + ```bash + kubectl logs -n demo # job name found in previous step + ``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete cluster pg-restored -n demo +kubectl delete ns demo +``` + +## Summary + +This guide covered two restoration methods: + +1. **Cluster Annotation** - Simple YAML-based approach + - Create cluster with restore annotation + - Monitor progress + +2. **OpsRequest API** - Enhanced operational control + - Create restore request + - Track operation status + - Verify completion diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..cd0dca63 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,245 @@ +--- +title: Restore a PostgreSQL Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks +description: Learn how to restore a PostgreSQL cluster using a full backup and continuous binlog backup for Point-In-Time Recovery (PITR) on KubeBlocks. +keywords: [PostgreSQL, Full Backup, PITR, KubeBlocks] +sidebar_position: 6 +sidebar_label: Restore with PITR +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a PostgreSQL Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks + +This guide demonstrates how to perform Point-In-Time Recovery (PITR) for PostgreSQL clusters in KubeBlocks using: + +1. A full base backup +2. Continuous WAL (Write-Ahead Log) backups +3. Two restoration methods: + - Cluster Annotation (declarative approach) + - OpsRequest API (operational control) + +PITR enables recovery to any moment within the `timeRange` specified. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Prepare for PITR Restoration +To perform a PITR restoration, both a full backup and continuous backup are required. Refer to the documentation to configure these backups if they are not already set up. + +- Completed full backup +- Active continuous WAL backup +- Backup repository accessible +- Sufficient resources for new cluster + +To identify the list of full and continuous backups, you may follow the steps: + +### 1. Verify Continuous Backup +Confirm you have a continuous WAL backup, either running or completed: + +```bash +# expect EXACTLY ONE continuous backup per cluster +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=pg-cluster +``` + +### 2. Check Backup Time Range +Get the valid recovery window: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +Expected Output: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. Identify Full Backup +Find available full backups that meet: +- Status: Completed +- Completion time **AFTER** continuous backup start time + +```bash +# expect one or more Full backups +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=pg-cluster +``` + +:::tip +KubeBlocks automatically selects the **most recent** qualifying full backup as the base. + +Make sure there is a full backup meets the condition: its `stopTime`/`completionTimestamp` must **AFTER** Continuous backup's `startTime`, otherwise PITR restoration will fail. +::: + +## Restore a Cluster from Continuous Backup + +### Option 1: Restore a Cluster via Cluster Annotation + +Configure PITR parameters in cluster annotation: + +Key parameters: +- `name`: Continuous backup name +- `restoreTime`: Target recovery time (within backup `timeRange`) + +Apply this YAML configuration: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-restore-pitr + namespace: demo + annotations: + # NOTE: + # 1. replace with the continuouse backup name + # 2. replace with a valid time within the backup timeRange. + # 3. replace with the namespace of the backup + kubeblocks.io/restore-from-backup: '{"postgresql":{"name":"","namespace":"","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: "16.4.0" + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +The json string in the annotation is of structure: +```json +{ + "postgresql": { + "name": "", + "namespace": "", + "restoreTime": "", + "volumeRestorePolicy": "Parallel" + } +} +``` +- `postgresql`: the component name in the cluster (check `cluster.spec.componentSpecs[].name`) +- `name`: the continuous backup name +- `namespace`: the namespace of the backup +- `restoreTime`: the restore time, must be within the continuous backup `timeRange` +- `volumeRestorePolicy`: the volume restore policy, `Parallel` or `Serial` + +### Option 2: Restore a Cluster via Restore OpsRequest + +Create a `Restore` OpsRequest: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-restore-pitr-ops + namespace: demo +spec: + clusterName: pg-restore-pitr # restored cluster name + restore: + backupName: # replace it with your continuous backup name + backupNamespace: # replace it with the namespace of the backup + restorePointInTime: # replace it with a valid time within the backup timeRange, e.g. 2025-09-03T12:34:56Z + type: Restore +``` + +## Step 2: Monitor Restoration + +1. Check component events: + + ```bash + # describe component postgresql + kubectl describe cmp pg-restore-pitr-postgresql -n demo + ``` + + It will show the following events. When all the restore tasks are completed, the component will be in `Running` state. + ```bash + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Warning 6m51s component-controller config/script template has no template specified: postgresql-configuration + Normal NeedWaiting 6m44s (x7 over 6m51s) component-controller waiting for restore "pg-restore-pitr-postgresql-a6b02251-preparedata" successfully + Normal Unknown 6m31s component-controller the component phase is unknown + Normal ComponentPhaseTransition 6m30s (x4 over 6m31s) component-controller component is Creating + Normal Unavailable 6m30s (x4 over 6m31s) component-controller the component phase is Creating + Normal ComponentPhaseTransition 6m component-controller component is Running + Normal Available 6m component-controller the component phase is Running + Normal NeedWaiting 6m (x3 over 6m) component-controller waiting for restore "pg-restore-pitr-postgresql-a6b02251-postready" successfully + ``` + +2. Check restore status: + + ```bash + # Watch restore status + kubectl get restore -n demo + ``` + + There will be two restore resources created, one is for the data preparation, and the other is for the post-ready tasks. + ```bash + NAME BACKUP RESTORE-TIME STATUS DURATION CREATION-TIME COMPLETION-TIME + pg-restore-pitr-postgresql-a6b02251-postready b387c27b-pg-cluster-postgresql-archive-wal 2025-05-16T08:03:50Z Completed 4s 2025-05-16T08:07:57Z 2025-05-16T08:08:00Z + pg-restore-pitr-postgresql-a6b02251-preparedata b387c27b-pg-cluster-postgresql-archive-wal 2025-05-16T08:03:50Z Completed 21s 2025-05-16T08:07:06Z 2025-05-16T08:07:26Z + ``` + +## Troubleshooting + +When encountering backup issues, such as Backup status is `Failed` or stuck in `Running`for quite a long time, follow these steps to diagnose and resolve the problem: + +1. Inspect the Backup resource for any error events or status updates: + ```bash + kubectl describe backup -n demo + ``` + +2. Verify the backup job status and examine its logs: + KubeBlocks runs a Job to create a full backup. If the backup task gets stuck, you can track the Job progress: + ```bash + kubectl -n demo get job -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection + ``` + + And check pod logs: + ```bash + kubectl -n demo logs + ``` + + This job will be deleted when the backup completes. + +3. Review KubeBlocks controller logs for detailed error information: + + ```bash + kubectl -n kb-system logs deploy/kubeblocks -f + ``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete cluster pg-restore-pitr -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to restore a PostgreSQL cluster in KubeBlocks using a full backup and continuous backup for Point-In-Time Recovery (PITR). Key steps included: +- Verifying available backups. +- Creating a new PostgreSQL cluster with restoration configuration. +- Monitoring the restoration process. + +With this approach, you can restore a PostgreSQL cluster to a specific point in time, ensuring minimal data loss and operational continuity. + diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/_category_.yml new file mode 100644 index 00000000..cd4faeaf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Backup And Restores +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..20e5376d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,139 @@ +--- +title: Create a PostgreSQL Cluster with a Custom Root Password on KubeBlocks +description: Learn how to deploy a PostgreSQL cluster on KubeBlocks with a custom root password securely configured using Kubernetes Secrets. +keywords: [PostgreSQL, KubeBlocks, Custom Password, Kubernetes, Secrets] +sidebar_position: 1 +sidebar_label: Custom Password +--- + +# Create PostgreSQL Cluster With Custom Password on KubeBlocks + +This guide demonstrates how to deploy a PostgreSQL cluster in KubeBlocks with a custom root password stored in a Kubernetes Secret. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the PostgreSQL Replication Cluster + +KubeBlocks uses a declarative approach for managing PostgreSQL clusters. Below is an example configuration for deploying a PostgreSQL cluster with 2 nodes (1 primary, 1 replicas) and a custom root password. + +### Step 1: Create a Secret for the Root Account + +The custom root password is stored in a Kubernetes Secret. Create the Secret by applying the following YAML: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-pg-secret + namespace: demo +``` +- password: Replace custompassword with your desired password and encode it using Base64 (`echo -n "custompassword" | base64`). +- username: The default PostgreSQL postgres user is 'root', encoded as 'cm9vdA=='. + + +### Step 2: Deploy the PostgreSQL Cluster + +Apply the following manifest to deploy the PostgreSQL cluster, referencing the Secret created in Step 1 for the root account: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + systemAccounts: + - name: postgres + secretRef: + name: custom-pg-secret + namespace: demo + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `systemAccounts`: Overrides system accounts defined in the referenced `ComponentDefinition`. + + +:::tip + +In KubeBlocks PostgreSQL Addon, a list of system accounts is defined. And only those accounts can be customized with a new secret. + +::: + +To get the of accounts: +```bash +kubectl get cmpd postgresql-16-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +Expected Output: +```bash +postgres +kbadmin +... +``` + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Connecting to the PostgreSQL Cluster + +KubeBlocks automatically creates a secret containing the PostgreSQL postgres credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +To connect to the cluster's primary node, use the PostgreSQL client with the custom password: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=postgres PGPASSWORD=custompassword psql +``` + + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete secret custom-pg-secret -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you: +- Created a Kubernetes Secret to securely store a custom PostgreSQL postgres password. +- Deployed a PostgreSQL cluster in KubeBlocks with a custom root password. +- Verified the deployment and connected to the cluster's primary node using the PostgreSQL client. + +Using Kubernetes Secrets ensures secure credential management for your PostgreSQL clusters, while KubeBlocks simplifies the deployment and management process. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx new file mode 100644 index 00000000..26dab57b --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx @@ -0,0 +1,116 @@ +--- +title: Deploy a PostgreSQL Cluster with a Custom Password Generation Policy on KubeBlocks +description: Learn how to deploy a PostgreSQL cluster in KubeBlocks with a custom password generation policy for the root user to enhance security. +keywords: [PostgreSQL, KubeBlocks, Password Policy, Kubernetes, Security] +sidebar_position: 2 +sidebar_label: Custom Password Policy +--- + +# Create a PostgreSQL Cluster With Custom Password Generation Policy on KubeBlocks +This guide explains how to deploy a PostgreSQL cluster in KubeBlocks with a custom password generation policy for the root user. By defining specific password rules, you can ensure strong, secure credentials for your cluster. + + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the PostgreSQL Replication Cluster + +KubeBlocks uses a declarative approach for managing PostgreSQL clusters. Below is an example configuration for deploying a PostgreSQL cluster with 2 nodes (1 primary, 1 replicas) and custom password generation policy. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + systemAccounts: + - name: postgres + passwordConfig: + length: 20 # Password length: 20 characters + numDigits: 4 # At least 4 digits + numSymbols: 2 # At least 2 symbols + letterCase: MixedCases # Uppercase and lowercase letters + symbolCharacters: '!' # set the allowed symbols when generating password + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Explanation of Key Fields** +- `systemAccounts`: Overrides system accounts defined in the referenced `ComponentDefinition`. +- `passwordConfig`: Customizes the password generation policy for the `postgres` user. +- `symbolCharacters`: Sets the allowed symbols when generating password. + +:::tip + +In KubeBlocks PostgreSQL Addon, a list of system accounts is defined. And only those accounts can be customized with a new secret. + +::: + +To get the of accounts: +```bash +kubectl get cmpd postgresql-16-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +Expected Output: +```bash +postgres +kbadmin +... +``` + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## Connecting to the PostgreSQL Cluster + +KubeBlocks automatically creates a secret containing the PostgreSQL postgres credentials. Retrieve the credentials with the following commands: + +```bash +PASSWORD=$(kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d) +``` + +To connect to the cluster's primary node, use the PostgreSQL client with the custom password: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=postgres PGPASSWORD=$PASSWORD psql +``` + +## Cleanup +To remove all created resources, delete the PostgreSQL cluster along with its namespace: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you: +- Deployed a PostgreSQL cluster in KubeBlocks with a custom password generation policy. +- Verified the deployment and connected to the cluster's primary node using the PostgreSQL client. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/_category_.yml new file mode 100644 index 00000000..bf29dd85 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +position: 6 +label: Custom Secret +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx new file mode 100644 index 00000000..b3f4ee25 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx @@ -0,0 +1,194 @@ +--- +title: Deploying a PostgreSQL Cluster with TLS on KubeBlocks +description: Learn how to deploy a PostgreSQL cluster with TLS encryption on KubeBlocks for secure communication. This guide covers deployment configuration, secure connections, and resource cleanup. +keywords: [KubeBlocks, PostgreSQL, Kubernetes, TLS, Secure Communication] +sidebar_position: 1 +sidebar_label: PostgreSQL Cluster with TLS +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deploying a PostgreSQL Cluster with TLS on KubeBlocks + +This guide demonstrates how to deploy a PostgreSQL cluster with TLS encryption using KubeBlocks. Transport Layer Security (TLS) ensures secure communication between PostgreSQL clients and servers by encrypting data in transit, protecting sensitive information from interception. You'll learn how to: + +- Deploy a PostgreSQL cluster with TLS enabled +- Establish secure connections using different TLS modes +- Verify the TLS configuration +- Clean up resources after testing + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the PostgreSQL Replication Cluster + +KubeBlocks uses a declarative approach for managing PostgreSQL clusters. Below is a configuration example for deploying a PostgreSQL cluster with TLS enabled (1 primary, 1 replica): + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + tls: true # Enable TLS encryption + issuer: + name: KubeBlocks # Use KubeBlocks' built-in certificate authority + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Key Configuration Fields**: +- `tls: true`: Enables TLS encryption for all connections +- `issuer: KubeBlocks`: Uses KubeBlocks' built-in certificate authority (alternatively: `UserProvided` for custom certificates) + +## Verifying the Deployment + +Monitor the cluster status until it reaches the `Running` state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` + +Verify TLS configuration on PostgreSQL instances: +```sql +postgres=# show ssl; + ssl +----- + on +(1 row) + +postgres=# show ssl_ca_file; + ssl_ca_file +--------------------- + /etc/pki/tls/ca.pem +(1 row) + +postgres=# show ssl_cert_file; + ssl_cert_file +---------------------- + /etc/pki/tls/cert.pem +(1 row) + +postgres=# show ssl_key_file; + ssl_key_file +--------------------- + /etc/pki/tls/key.pem +(1 row) +``` + +Verify TLS certificates generated by KubeBlocks: +```bash +kubectl get secret -l app.kubernetes.io/instance=pg-cluster -n demo | grep tls +``` + +Expected Output: +```bash +pg-cluster-postgresql-tls-certs Opaque 3 24m +``` + +## Accessing PostgreSQL Cluster Securely + +### Step 1: Retrieve Credentials + +KubeBlocks creates a Secret containing PostgreSQL credentials: +```bash +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### Step 2: Connect Using TLS + +Forward PostgreSQL port locally: +```bash +kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo +``` + + + + +```bash +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=require" +``` + +Example Output: +```bash +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off, ALPN: none) +Type "help" for help. + +postgres=# +``` + + + + + +1. Retrieve and save the root certificate: +```bash +kubectl get -n demo secrets pg-cluster-postgresql-tls-certs -oyaml | yq '.data."ca.pem"' | base64 -d > /tmp/ca.crt +``` + +2. Connect with certificate verification: +```bash +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=verify-full sslrootcert=/tmp/ca.crt" +``` + +Example Output: +```bash +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off, ALPN: none) +Type "help" for help. + +postgres=# +``` + + + + +## Cleanup + +Remove all tutorial resources: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide, you learned how to: + +1. Deploy a PostgreSQL cluster with TLS encryption using KubeBlocks +2. Verify TLS configuration and certificate generation +3. Establish secure connections using different TLS modes: + - `require`: Basic encryption + - `verify-full`: Full certificate validation diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx new file mode 100644 index 00000000..864bd82c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx @@ -0,0 +1,200 @@ +--- +title: Deploy a PostgreSQL Cluster with Custom TLS Certificates on KubeBlocks +description: Step-by-step guide to deploy a PostgreSQL cluster on KubeBlocks using custom TLS certificates for secure communication. Includes certificate generation, cluster deployment, and connection verification. +keywords: [KubeBlocks, PostgreSQL, Kubernetes, TLS, Security, Custom Certificates] +sidebar_position: 2 +sidebar_label: PostgreSQL Cluster with Custom TLS +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deploy a PostgreSQL Cluster with Custom TLS Certificates on KubeBlocks + +This guide demonstrates how to deploy a PostgreSQL cluster with **custom TLS certificates** using KubeBlocks. By providing your own certificates, you maintain complete control over the security configuration for encrypted client-server communication. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Generate Certificates + +Generate the required certificates using OpenSSL: + +1. **Root Certificate (CA)** +```bash +# Generate CA private key (password protected) +openssl genrsa -aes256 -out ca-key.pem 4096 + +# Create self-signed root certificate (10-year validity) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# Enter certificate details (e.g., Common Name = "PostgreSQL Root CA") +``` + +2. **Server Certificate** +```bash +# Generate server private key +openssl genrsa -out server-key.pem 4096 + +# Create Certificate Signing Request +openssl req -new -key server-key.pem -out server-req.pem +# Enter server details (Common Name must match PostgreSQL server address) + +# Sign server certificate with CA (10-year validity) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem \ + -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +:::note + +The Common Name (CN) must match your PostgreSQL server address (e.g., service name `pg-cluster-postgresql-postgresql`). + +::: + +3. **Verify Certificates** +```bash +openssl verify -CAfile ca.pem server-cert.pem +# Example Output: server-cert.pem: OK +``` + +## Create Kubernetes Secret + +Store certificates in a Kubernetes Secret for cluster access: + +```bash +kubectl create secret generic postgresql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` + +## Deploy PostgreSQL Cluster + +Deploy a 2-node PostgreSQL cluster (1 primary, 1 replica) with TLS: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + tls: true + issuer: + name: UserProvided + secretRef: + name: postgresql-tls-secret + namespace: demo + ca: ca.crt + cert: tls.crt + key: tls.key + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Key Configuration**: +- `tls: true`: Enables TLS encryption +- `issuer.name: UserProvided`: Specifies custom certificates +- `issuer.secretRef`: Links to the certificate Secret + +## Verify Deployment + +Monitor cluster status until it reaches Running state: + +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Verify SSL configuration on replicas: + +```sql +postgres=# show ssl; + ssl +----- + on + +postgres=# show ssl_ca_file; + ssl_ca_file +--------------------- + /etc/pki/tls/ca.pem + +postgres=# show ssl_cert_file; + ssl_cert_file +---------------------- + /etc/pki/tls/cert.pem + +postgres=# show ssl_key_file; + ssl_key_file +---------------------- + /etc/pki/tls/key.pem +``` + +## Access PostgreSQL Cluster + +### Retrieve Credentials + +```bash +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### Connect Securely + + + + +```bash +kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo + +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=require" +# Output shows SSL connection details +``` + + + + + +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} \ + psql 'host=pg-cluster-postgresql-postgresql sslmode=verify-full sslrootcert=/etc/pki/tls/ca.pem' +# Output shows SSL connection details +``` + + + + +## Summary + +In this guide you: +1. Generated self-signed CA and server certificates +2. Stored certificates in a Kubernetes Secret +3. Deployed a TLS-enabled PostgreSQL cluster +4. Verified secure connections + +Using custom TLS certificates ensures encrypted communication between PostgreSQL clients and servers, protecting sensitive data in transit. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/_category_.yml new file mode 100644 index 00000000..fd9b300a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/07-tls/_category_.yml @@ -0,0 +1,4 @@ +position: 7 +label: TLS +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..ea3cbd8f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,263 @@ +--- +title: Observability for PostgreSQL Clusters with the Prometheus Operator +description: Learn how to set up observability for PostgreSQL Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, PostgreSQL, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for PostgreSQL Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for PostgreSQL clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in PostgreSQL exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a PostgreSQL Cluster + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +**Key Monitoring Configuration** +- `disableExporter: false` enables the built-in metrics exporter +- Exporter runs as sidecar container in each PostgreSQL pod +- Scrapes PostgreSQL metrics on port 9187 + +## Verifying the Deployment +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Example Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +Once the cluster status becomes Running, your PostgreSQL cluster is ready for use. + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint +Confirm metrics are exposed: + +```bash +kubectl get po pg-cluster-postgresql-0 -n demo -oyaml | \ + yq '.spec.containers[] | select(.name=="exporter") | .ports' +``` + +Example Output: +```yaml +- containerPort: 9187 + name: http-metrics # Used in PodMonitor + protocol: TCP +``` + +Test metrics endpoint: + +```bash +kubectl -n demo exec -it pods/pg-cluster-postgresql-0 -- \ + curl -s http://127.0.0.1:9187/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: pg-cluster-pod-monitor + namespace: demo + labels: # Must match the setting in 'prometheus.spec.podMonitorSelector' + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # defines the labels which are transferred from the + # associated Kubernetes 'Pod' object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: http-metrics # Must match exporter port name + scheme: http + namespaceSelector: + matchNames: + - demo # Target namespace + selector: + matchLabels: + app.kubernetes.io/instance: pg-cluster + apps.kubeblocks.io/component-name: postgresql +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where PostgreSQL runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/pg-cluster-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="pg-cluster"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "pg-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "postgresql", + "apps_kubeblocks_io_pod_name": "pg-cluster-postgresql-1", + "container": "exporter", + "endpoint": "http-metrics", + "instance": "10.244.0.129:9187", + "job": "demo/pg-cluster-pod-monitor", + "namespace": "demo", + "pod": "pg-cluster-postgresql-1" + }, + "value": [ + 1747377596.792, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "pg-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "postgresql", + "apps_kubeblocks_io_pod_name": "pg-cluster-postgresql-0", + "container": "exporter", + "endpoint": "http-metrics", + "instance": "10.244.0.128:9187", + "job": "demo/pg-cluster-pod-monitor", + "namespace": "demo", + "pod": "pg-cluster-postgresql-0" + }, + "value": [ + 1747377596.792, + "1" + ] + } + ] + } +} +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks PostgreSQL dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Choose one of these methods: + - Paste the dashboard URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/postgresql/dashboards/postgresql.json` + - Or upload the JSON file directly + +**Dashboard Includes:** +- Cluster status overview +- Query performance metrics +- Connection statistics +- Replication health + +![postgresql-monitoring-grafana-dashboard.png](/img/docs/en/postgresql-monitoring-grafana-dashboard.png) + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor pg-cluster-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a PostgreSQL cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the PostgreSQL exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your PostgreSQL databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/09-faqs.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/09-faqs.mdx new file mode 100644 index 00000000..6cb759fa --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/09-faqs.mdx @@ -0,0 +1,71 @@ +--- +title: FAQs +description: FAQs of PostgreSQL +keywords: [KubeBlocks, PostgreSQL, Kubernetes Operator] +sidebar_position: 9 +sidebar_label: FAQs +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL FAQs + +## 1. Use ETCD as Patroni DCS + +KubeBlocks PostgreSQL uses the Kubernetes API itself as DCS (Distributed Config Store) by default. +But when the control plane is under extreme high load, it may lead to unexpected demotion of the primary replica. And it's recommended to use ETCD as DCS in such extreme cases. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster-etcd + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: "16.4.0" + env: + - name: DCS_ENABLE_KUBERNETES_API # unset this env if you use zookeeper or etcd, default to empty + - name: ETCD3_HOST + value: 'etcd-cluster-etcd-headless.demo.svc.cluster.local:2379' # where is your etcd? + # - name: ZOOKEEPER_HOSTS + # value: 'myzk-zookeeper-0.myzk-zookeeper-headless.demo.svc.cluster.local:2181' # where is your zookeeper? + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +The key fields are: +- `DCS_ENABLE_KUBERNETES_API`: Unset this env to use ETCD or ZooKeeper as DCS +- `ETCD3_HOST`: The host of ETCD cluster + +You can also use ZooKeeper as DCS by unsetting `DCS_ENABLE_KUBERNETES_API` and setting `ZOOKEEPER_HOSTS` to the host of ZooKeeper cluster. + +KubeBlocks has ETCD and ZooKeeper Addons in the `kubeblocks-addons` repository. You can refer to the following links for more details. +- https://github.com/apecloud/kubeblocks-addons/tree/main/examples/etcd +- https://github.com/apecloud/kubeblocks-addons/tree/main/examples/zookeeper + +You can shell into one of the etcd container to view the etcd data, and view the etcd data with etcdctl. + +```bash +etcdctl get /service --prefix +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_category_.yml new file mode 100644 index 00000000..056f1daf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for PostgreSQL Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx new file mode 100644 index 00000000..aadbb302 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx @@ -0,0 +1,35 @@ +KubeBlocks uses a declarative approach for managing PostgreSQL clusters. Below is an example configuration for deploying a PostgreSQL cluster with 2 replicas (1 primary, 1 replicas). + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx new file mode 100644 index 00000000..94aec06f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx @@ -0,0 +1,18 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +Expected Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +Once the cluster status becomes Running, your PostgreSQL cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/01-overview.mdx new file mode 100644 index 00000000..c6faa439 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/01-overview.mdx @@ -0,0 +1,57 @@ +--- +title: Overview of KubeBlocks Qdrant Addon +description: Learn about the features and capabilities of the KubeBlocks Qdrant addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [Qdrant, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks Qdrant Addon + +Qdrant is an open-source vector search engine and vector database designed for efficient similarity search and storage of high-dimensional vectors. It is optimized for AI-driven applications, such as semantic search, recommendation systems, and retrieval-augmented generation (RAG) in large language models (LLMs). + +## Key Features + +### Lifecycle Management + +KubeBlocks simplifies Qdrant operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for Qdrant instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Password Management** | Ability to set and manage custom root password for the Qdrant cluster during creation | +| **Custom Services** | Expose specialized database endpoints | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + + +### Backup and Restore + +KubeBlocks supports multiple backup strategies for Qdrant: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | datafile | uses HTTP API `snapshot` to create snapshot for all collections. | + +### Supported Versions + +KubeBlocks Qdrant Addon supports these Qdrant versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 1.5 | 1.5.0 | +| 1.7 | 1.7.3 | +| 1.8 | 1.8.1,1.8.4 | +| 1.10| 1.10.0 | + +The list of supported versions can be found by following command: +```bash +kubectl get cmpv qdrant +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/02-quickstart.mdx new file mode 100644 index 00000000..1002a962 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/02-quickstart.mdx @@ -0,0 +1,446 @@ +--- +title: Qdrant Quickstart +description: Comprehensive guide to deploying and managing Qdrant ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, Qdrant, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Qdrant Quickstart + +This guide provides a comprehensive walkabout for deploying and managing Qdrant ReplicaSet Clusters using the **KubeBlocks Qdrant Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify Qdrant Add-on + +The Qdrant Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep qdrant +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-qdrant kb-system 1 2025-05-21 deployed qdrant-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/qdrant --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-qdrant kubeblocks-addons/qdrant --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search qdrant + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install qdrant --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + qdrant 0.9.0 kubeblocks + qdrant 0.9.1 kubeblocks + qdrant 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable qdrant + # Disable Add-on + kbcli addon disable qdrant + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the Qdrant Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported Qdrant Versions + +**List available Qdrant versions:** + +```bash +kubectl get cmpv qdrant +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +qdrant 1.14.0,1.10.0,1.8.4,1.8.1,1.7.3,1.5.0 Available 26d +``` +
+ +**Check version compatibility for ComponentDefinitions** + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv qdrant -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +qdrant-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv qdrant -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^qdrant"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `qdrant`: + +
+Example Output +```text +1.5.0 +1.7.3 +1.8.1 +1.8.4 +1.10.0 +1.14.0 +``` +
+ +### Storage Configuration + +Qdrant requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a Qdrant Cluster + +Deploy a basic Qdrant Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/cluster.yaml +``` + +This creates: +- A Qdrant Cluster with 3 replicas. +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `qdrant` to create a Qdrant Cluster + clusterDef: qdrant + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # Valid options are: [cluster] + topology: cluster + componentSpecs: + - name: qdrant + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # Valid options are: [1.10.0,1.5.0,1.7.3,1.8.1,1.8.4] + serviceVersion: 1.10.0 + # Update `replicas` to your need. + # Recommended values are: [3,5,7] + replicas: 3 + # Specifies the resources required by the Component. + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +### Create a Version-Specific Qdrant Cluster + +To create a cluster with a specific version, configure `spec.componentSpecs.serviceVersion` (major.minor version) fields before applying it: + + + + ```yaml + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 # Valid options: [1.10.0,1.5.0,1.7.3,1.8.1,1.8.4] + ``` + + + +## Verify Cluster Status + +When deploying a Qdrant Cluster with 3 replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational + +Check status using either method: + + + +```bash +kubectl get cluster qdrant-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 27s +qdrant-cluster qdrant Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo +qdrant-cluster-qdrant-0 2/2 Running 0 92s +qdrant-cluster-qdrant-1 2/2 Running 0 77s +qdrant-cluster-qdrant-2 2/2 Running 0 63s +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe qdrant-cluster -n demo + +Name: qdrant-cluster Created Time: May 18,2025 23:05 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo qdrant cluster Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +qdrant qdrant-cluster-qdrant-qdrant.demo.svc.cluster.local:6333 + qdrant-cluster-qdrant-qdrant.demo.svc.cluster.local:6334 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +qdrant 1.10.0 qdrant-cluster-qdrant-0 Running zone-x x.y.z May 18,2025 23:05 UTC+0800 +qdrant 1.10.0 qdrant-cluster-qdrant-1 Running zone-x x.y.z May 18,2025 23:06 UTC+0800 +qdrant 1.10.0 qdrant-cluster-qdrant-2 Running zone-x x.y.z May 18,2025 23:06 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +qdrant 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +qdrant qdrant-1.0.0 docker.io/qdrant/qdrant:v1.10.0 + docker.io/apecloud/curl-jq:0.1.0 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo qdrant-cluster +``` + + + + +## Stop the Qdrant Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-stop + namespace: demo + spec: + clusterName: qdrant-cluster + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: qdrant + stop: true # Set to stop component + replicas: 3 + ``` + + + +## Start the Qdrant Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-start + namespace: demo + spec: + clusterName: qdrant-cluster + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + +## Delete Qdrant Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster qdrant-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster qdrant-cluster -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..a48ed9d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,281 @@ +--- +title: Qdrant Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage Qdrant Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, Qdrant, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Qdrant Cluster Lifecycle Management + +This guide demonstrates how to manage a Qdrant Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a Qdrant Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-stop-ops + namespace: demo +spec: + clusterName: qdrant-cluster + type: Stop +``` + + + + +Option 2: Cluster API Patch + +Modify the cluster spec directly by patching the stop field: + + +```bash +kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Stopping 6m3s + qdrant-cluster qdrant Delete Stopped 6m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=qdrant-cluster -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-qdrant-cluster-qdrant-0 Bound pvc-uuid 20Gi RWO 22m + data-qdrant-cluster-qdrant-1 Bound pvc-uuid 20Gi RWO 21m + data-qdrant-cluster-qdrant-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### Starting the Cluster + +Starting a stopped Qdrant Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: qdrant-cluster + type: Start +``` + + + + + +Modify the cluster spec to resume operation: +1. Set stop: false, or +2. Remove the stop field entirely + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Updating 24m + qdrant-cluster qdrant Delete Running 24m + qdrant-cluster qdrant Delete Running 24m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + qdrant-cluster-qdrant-0 2/2 Running 0 55s + qdrant-cluster-qdrant-1 2/2 Running 0 44s + qdrant-cluster-qdrant-2 2/2 Running 0 33s + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Using OpsRequest API** + +Target specific components `qdrant` for restart: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-restart-ops + namespace: demo +spec: + clusterName: qdrant-cluster + type: Restart + restart: + - componentName: qdrant +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest qdrant-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-restart-ops Restart qdrant-cluster Running 0/3 4s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 1/3 28s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 2/3 56s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 2/3 109s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster + ``` + Note: Pods will show new creation timestamps after restart + +3. Verify component health: + ```bash + kbcli cluster describe qdrant-cluster -n demo + ``` + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a Qdrant Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your Qdrant Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..0eda03ea --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,174 @@ +--- +title: Vertical Scaling in a Qdrant Cluster +description: Learn how to perform vertical scaling in a Qdrant Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, Qdrant, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for Qdrant Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a Qdrant Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for Qdrant instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks ensures minimal impact during scaling operations by following a controlled, role-aware update strategy: +**Role-Aware Replicas (Primary/Secondary Replicas)** +- Secondary replicas update first – Non-leader pods are upgraded to minimize disruption. +- Primary updates last – Only after all secondaries are healthy does the primary pod restart. +- Cluster state progresses from Updating → Running once all replicas are stable. + +**Role-Unaware Replicas (Ordinal-Based Scaling)** +If replicas have no defined roles, updates follow Kubernetes pod ordinal order: +- Highest ordinal first (e.g., pod-2 → pod-1 → pod-0) to ensure deterministic rollouts. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Pods are updated in pod ordinal order, from highest to lowest, (e.g., pod-2 → pod-1 → pod-0) +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the qdrant component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-vscale-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: VerticalScaling + verticalScaling: + - componentName: qdrant + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops qdrant-cluster-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 0/3 32s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 1/3 55s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 2/3 82s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 3/3 2m13s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 3 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting Qdrant parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe qdrant-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +qdrant 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the Qdrant Cluster along with its namespace: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a Qdrant Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the qdrant component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your Qdrant Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..48f7f1ea --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,279 @@ +--- +title: Horizontal Scaling of Qdrant Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a Qdrant cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, Qdrant, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for Qdrant Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a Qdrant cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running`. +2. Cluster status changes from `Updating` to `Running` + +:::note + +Qdrant uses the **Raft consensus protocol** to maintain consistency regarding the cluster topology and the collections structure. +Better to have an odd number of replicas, such as 3, 5, 7, to avoid split-brain scenarios, after scaling out/in the cluster. + +::: + + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the Qdrant cluster by adding 1 replica to qdrant component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-scale-out-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops qdrant-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Running 0/1 9s + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Running 1/1 16s + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Succeed 1/1 16s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 4 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the Qdrant cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 6m24s +qdrant-cluster-qdrant-1 2/2 Running 0 7m19s +qdrant-cluster-qdrant-2 2/2 Running 0 5m57s +qdrant-cluster-qdrant-3 2/2 Running 0 3m54s +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + +:::note + +On Qdrant scale-in, data will be redistributed among the remaining replicas. Make sure the cluster have enough capacity to accommodate the data. +The data redistribution process may take some time depending on the amount of data. +It is handled by Qdrant `MemberLeave` operation, and Pods won't be deleted until the data redistribution, i.e. the `MemberLeave` actions completed successfully. + +::: + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the Qdrant cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-scale-in-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops qdrant-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Running 0/1 8s + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Running 1/1 24s + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 1 # decrease replicas to scale-out + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 18m +``` + +## Troubleshooting + +On scale-in, KubeBlocks Qdrant will redistribute data in following steps: + +1. Cluster Information Gathering: + +- Identifies the leaving member +- Retrieves cluster state including peer IDs and leader information + +2. Data Migration: + +- Discovers all collections on the leaving member +- For each collection, finds all local shards +- Moves each shard to the cluster leader +- Verifies successful shard transfer before proceeding + +3. Cluster Membership Update: + +- Removes the leaving peer from the cluster membership +- Uses file locking to prevent concurrent removal operations + + +If the scale-in operation gets stuck for quite a long time, please check these resources: + +```bash +# Check agent logs +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the Qdrant cluster along with its namespace: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a Qdrant cluster. +- Perform scale-in operations to remove replicas from a Qdrant cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..33591b9c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx @@ -0,0 +1,218 @@ +--- +title: Expanding Volume in a Qdrant Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a Qdrant cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, Qdrant, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a Qdrant Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a Qdrant cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a Qdrant Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage Qdrant clusters. Below is an example configuration for deploying a Qdrant cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the qdrant component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: qdrant + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops qdrant-cluster-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: qdrant + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe qdrant-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +qdrant 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +qdrant-cluster-qdrant-data-0 Bound pvc-uuid 30Gi RWO 33m +qdrant-cluster-qdrant-data-1 Bound pvc-uuid 30Gi RWO 33m +qdrant-cluster-qdrant-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## Cleanup +To remove all created resources, delete the Qdrant cluster along with its namespace: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your Qdrant cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..b5c2f3f6 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,297 @@ +--- +title: Create and Destroy Qdrant Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage Qdrant services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, Qdrant, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage Qdrant Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage Qdrant Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing Qdrant services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## View Network Services +List the Services created for the Qdrant cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +qdrant-cluster-qdrant-qdrant ClusterIP 10.96.111.81 6333/TCP,6334/TCP 28m +``` + +## Expose Qdrant Service + +External service addresses enable public internet access to Qdrant, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the Qdrant service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: qdrant-cluster + expose: + - componentName: qdrant + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops qdrant-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-expose-enable-ops Expose qdrant-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: qdrant-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: qdrant + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: qdrant + name: qdrant-internet + serviceName: qdrant-internet + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tpc-qdrant + port: 6333 + protocol: TCP + targetPort: tcp-qdrant + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +qdrant-cluster-qdrant-internet LoadBalancer 172.20.60.24 6333:31243/TCP 1m +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: qdrant-cluster + expose: + - componentName: qdrant + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops qdrant-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-expose-disable-ops Expose qdrant-cluster Succeed 1/1 24s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Running 24m + ``` + + + +### Verify Service Removal + +Ensure that the 'qdrant-cluster-qdrant-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +Expected Result: The 'qdrant-cluster-qdrant-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the Qdrant cluster along with its namespace: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a Qdrant service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing Qdrant services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..8adee887 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,271 @@ +--- +title: Upgrading the Minor Version of a Qdrant Cluster in KubeBlocks +description: Learn how to deploy and upgrade a Qdrant Cluster managed by KubeBlocks with minimal downtime. +keywords: [KubeBlocks, Qdrant, Upgrade, Rolling Upgrade, Kubernetes] +sidebar_position: 6 +sidebar_label: Minor Version Upgrade +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Upgrading the Minor Version of a Qdrant Cluster in KubeBlocks + +This guide walks you through the deployment and minor version upgrade of a Qdrant Cluster managed by KubeBlocks, ensuring minimal downtime during the process. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Deploy a Qdrant Cluster + +KubeBlocks uses a declarative approach for managing Qdrant Clusters. Below is an example configuration for deploying a Qdrant Cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +## Verifying the Deployment +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` + +Example Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 49s +qdrant-cluster qdrant Delete Running 62s +``` +Once the cluster status becomes Running, your Qdrant Cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: + +## List All Available Qdrant Versions + +Use the following command to display the Qdrant versions supported by your KubeBlocks installation: +```bash +kubectl get cmpv qdrant +``` +Expected Output: +```bash +NAME VERSIONS STATUS AGE +qdrant 1.14.0,1.10.0,1.8.4,1.8.1,1.7.3,1.5.0 Available 26d +``` + +Note: The list of supported versions may vary depending on your KubeBlocks version. + +## Upgrading the Qdrant Version + +### Check compatible versions for the same ComponentDefinition + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv qdrant -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +qdrant-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv qdrant -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^qdrant"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `qdrant`: + +
+Example Output +```text +1.5.0 +1.7.3 +1.8.1 +1.8.4 +1.10.0 +1.14.0 +``` +
+ +### Apply the Upgrade + +To upgrade the Qdrant version, modify the serviceVersion field in the Cluster resource. In this example, we will upgrade the Qdrant version from `1.10.0` to `1.14.0` + + + + + + Option 1: Using OpsRequest + + You can upgrade the cluster using an OpsRequest: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-upgrade + namespace: demo + spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: qdrant-cluster + type: Upgrade + upgrade: + components: + - componentName: qdrant + # Specifies the desired service version of component + serviceVersion: "1.14.0" + ``` + + + + Option 1: Using the Declarative Cluster API + + Alternatively, you may stop the cluster by setting the `spec.componentSpecs.serviceVersion` field in the cluster configuration: + + ```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.14.0 # set to 1.14.0 for upgrading + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +## Verification + + +### Monitor the Upgrade Process + +Check OpsRequest progress: +```bash +kubectl get ops -n demo qdrant-upgrade -w +``` + +Example Output: +``` +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-upgrade Upgrade qdrant-cluster Succeed 3/3 8m13s +``` + +Check pods: +``` +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 1 (7m23s ago) 13m +qdrant-cluster-qdrant-1 2/2 Running 1 (7m49s ago) 12m +qdrant-cluster-qdrant-2 2/2 Running 1 (7m59s ago) 12m +``` + +**Key Observations:** +- Pods are not recreated, `RESTARTS` counter increased by one. +- Pods are updated one by one in pod ordinal order, from highest to lowest + + +### Check Cluster Status +Ensure the cluster is in the Running state: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Running 17m +``` + +### Verify the Qdrant Version + +Connect to the upgraded instances and verify the Qdrant version: +```bash +kubectl exec -ti -n demo qdrant-cluster-qdrant-0 -c kbagent -- \ + curl http://127.0.0.1:6333 +``` + +Expected Output: +``` +curl http://127.0.0.1:6333 +{"title":"qdrant - vector search engine","version":"1.14.0","commit":"3617a0111fc8590c4adcc6e88882b63ca4dda9e7"}% +``` + +## Summary +In this guide, you learned how to: +- Deploy a Qdrant Cluster using KubeBlocks. +- Perform a rolling upgrade of the Qdrant minor version with minimal downtime. +- Verify that the upgrade was successful. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..f3480151 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,134 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed Qdrant Clusters +description: Learn how to decommission (take offline) a specific Pod in a Qdrant cluster managed by KubeBlocks. +keywords: [KubeBlocks, Qdrant, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission Qdrant Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed Qdrant Clusters + +This guide explains how to decommission (take offline) specific Pods in Qdrant clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + +To decommission a specific Pod (e.g., 'qdrant-cluster-qdrant-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-decommission-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + scaleIn: + onlineInstancesToOffline: + - 'qdrant-cluster-qdrant-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops qdrant-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Running 0/1 8s +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Running 1/1 31s +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 1 # explected replicas after decommission + offlineInstances: + - qdrant-cluster-qdrant-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 25m +qdrant-cluster-qdrant-2 2/2 Running 0 24m +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..c14f8d53 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,125 @@ +--- +title: Create a Backup Repository for KubeBlocks +description: Learn how to create and configure a BackupRepo for KubeBlocks using an S3 bucket to store backup data. +keywords: [KubeBlocks, Backup, BackupRepo, S3, Kubernetes] +sidebar_position: 1 +sidebar_label: Create BackupRepo +--- + +# Create a BackupRepo for KubeBlocks + +This guide walks you through creating and configuring a BackupRepo in KubeBlocks using an S3 bucket for storing backup data. + +## Prerequisites +- AWS CLI configured with appropriate permissions to create S3 buckets. +- kubectl access to your Kubernetes cluster. +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) and running in the kb-system namespace. + +## Step 1: Create S3 Bucket + +Use the AWS CLI to create an S3 bucket in your desired region. Replace `` with your target AWS region (e.g., `us-east-1`, `ap-southeast-1`). + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +Example (for us-west-1): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +Example Output: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +Verification: +Confirm the bucket was created by listing its contents (it will be empty initially): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## Step 2: Create a Kubernetes Secret for AWS Credentials + +Store your AWS credentials securely in a Kubernetes Secret. Replace `` and `` with your actual AWS credentials: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## Step 3: Configure Backup Repository + +A BackupRepo is a custom resource that defines a storage repository for backups. In this step, you'll integrate your S3 bucket with KubeBlocks by creating a BackupRepo resource. + +Apply the following YAML to create the BackupRepo. Replace fields(e.g., bucket name, region) with your specific settings. + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # mark this backuprepo as default one + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # Currently, KubeBlocks supports configuring various object storage services as backup repositories + # - s3 (Amazon Simple Storage Service) + # - oss (Alibaba Cloud Object Storage Service) + # - cos (Tencent Cloud Object Storage) + # - gcs (Google Cloud Storage) + # - obs (Huawei Cloud Object Storage) + # - minio, and other S3-compatible services. + storageProviderRef: s3 + # Specifies the access method of the backup repository. + # - Tool + # - Mount + accessMethod: Tool + # Specifies reclaim policy of the PV created by this backup repository. + pvReclaimPolicy: Retain + # Specifies the capacity of the PVC created by this backup repository. + volumeCapacity: 100Gi + # Stores the non-secret configuration parameters for the StorageProvider. + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # References to the secret that holds the credentials for the StorageProvider. + credential: + # name is unique within a namespace to reference a secret resource. + name: s3-credential-for-backuprepo + # namespace defines the space within which the secret name must be unique. + namespace: kb-system +``` + +## Step 4: Verify Backup Repository Status + +Check the status of the BackupRepo to ensure it is correctly initialized: + +```bash +kubectl get backuprepo s3-repo -w +``` + +Expected Status Flow: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +Troubleshooting: + - If status becomes Failed: + - Verify bucket name and region match your S3 configuration. + - Confirm AWS credentials in the Secret are correct. + - Check network connectivity between KubeBlocks and AWS S3. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..e2a00b64 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,217 @@ +--- +title: Create a Full Backup for a Qdrant Cluster on KubeBlocks +description: Step-by-step guide to creating and validating full backups for Qdrant clusters using Backup API and OpsRequest API in KubeBlocks. +keywords: [Qdrant, Full Backup, KubeBlocks, Kubernetes, Database Backup, XtraBackup] +sidebar_position: 2 +sidebar_label: Create Full Backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Create a Full Backup for Qdrant on KubeBlocks + +This guide demonstrates how to create and validate full backups for Qdrant clusters on KubeBlocks using the `pg-basebackup` method through both: +- The Backup API (direct backup operations) +- The OpsRequest API (managed backup operations with enhanced monitoring) + +We will cover how to restore data from a backup in the [Restore From Full Backup](./05-restoring-from-full-backup) guide. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Backup Prerequisites + +Before creating backups, ensure: +1. Backup repository is configured: + - `BackupRepo` resource exists + - Network connectivity between cluster and repository + - `BackupRepo` status shows "Ready" + +2. Cluster is ready: + - Cluster status is "Running" + - No ongoing operations (scaling, upgrades, etc.) + +## Identify Backup Configuration + +Check available backup policies and schedules: + +```bash +# List backup policies +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=qdrant-cluster + +# List backup schedules +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +Expected Output: +```bash +NAME BACKUP-REPO STATUS AGE +qdrant-cluster-qdrant-backup-policy Available 36m + +NAME STATUS AGE +qdrant-cluster-qdrant-backup-schedule Available 36m +``` + +View supported backup methods in the BackupPolicy CR 'qdrant-cluster-qdrant-backup-policy': + +```bash +kubectl get backuppolicy qdrant-cluster-qdrant-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**List of Backup methods** + +KubeBlocks Qdrant supports these backup methods: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | datafile | uses HTTP API `snapshot` to create snapshot for all collections. | + +## Backup via Backup API + +### 1. Create On-Demand Backup + +The `datafile` method backup the data files of the database + +Apply this manifest to create a backup: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: qdrant-backup-datafile + namespace: demo +spec: + # Specifies the backup method name that is defined in the backup policy. + # - datafile + backupMethod: datafile + # Specifies the backup policy to be applied for this backup. + backupPolicyName: qdrant-cluster-qdrant-backup-policy + # Determines whether the backup contents stored in the backup repository should be deleted when the backup custom resource(CR) is deleted. Supported values are `Retain` and `Delete`. + # - `Retain` means that the backup content and its physical snapshot on backup repository are kept. + # - `Delete` means that the backup content and its physical snapshot on backup repository are deleted. + deletionPolicy: Delete +``` + +### 2. Monitor Backup and Verify Completion + +Track progress until status shows "Completed": + +```bash +kubectl get backup qdrant-backup-datafile -n demo -w +``` + +Example Output: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +qdrant-backup-datafile qdrant-cluster-qdrant-backup-policy datafile Completed 0 10s Delete 2025-05-18T15:43:53Z 2025-05-18T15:44:02Z +``` + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Check files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Backup file size + + +## Backup via OpsRequest API + +### 1. Create On-Demand Backup + +Execute a backup using the OpsRequest API with the 'pg-basebackup' method: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-backup + namespace: demo +spec: + clusterName: qdrant-cluster + force: false + backup: + backupPolicyName: qdrant-cluster-qdrant-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. Monitor Backup Progress + +#### 1. Monitor Operation Status + +Track backup progress in real-time: +```bash +kubectl get ops qdrant-cluster-backup -n demo -w +``` + +Expected Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-cluster-backup Backup qdrant-cluster Running -/- 5s +qdrant-cluster-backup Backup qdrant-cluster Succeed -/- 10s +``` + +- A STATUS of 'Succeed' indicates the backup operation completed successfully. + +#### 2. Verify Completion + +Check the final backup status: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=qdrant-cluster-backup +``` + +Example Output: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-qdrant-cluster-20250518154515 qdrant-cluster-qdrant-backup-policy datafile Completed 0 10s Delete 2025-05-18T15:45:15Z 2025-05-18T15:45:25Z 2025-06-17T15:45:25Z +``` + +- The backup status should show 'Completed'. + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Other metadata + +## Summary + +This guide covered: +1. Deploying a Qdrant cluster +2. Creating full backups using: + - Direct Backup API + - Managed OpsRequest API +3. Monitoring and validating backups + +Your Qdrant data is now securely backed up and ready for restoration when needed. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..961832a9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,150 @@ +--- +title: Setting Up a Qdrant Cluster with Scheduled Backups in KubeBlocks +description: Learn how to deploy a Qdrant cluster using KubeBlocks and configure automated scheduled backups with retention in an S3 repository. +keywords: [Qdrant, Backup, KubeBlocks, Scheduled Backup, Kubernetes] +sidebar_position: 3 +sidebar_label: Scheduled Backups +--- + + +# Setting Up a Qdrant Cluster with Scheduled Backups in KubeBlocks + +This guide demonstrates how to deploy a Qdrant cluster using KubeBlocks and configure scheduled backups with retention in an S3 repository. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## Configure Scheduled Backups + +KubeBlocks automatically creates a `BackupSchedule` resource when the cluster is created. Follow these steps to enable and configure scheduled backups: + +1. Verify the default backup schedule configuration: + +```bash +kubectl get backupschedule qdrant-cluster-qdrant-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: qdrant-cluster-Qdrant-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── minute (0-59) + # │ ┌───────────── hour (0-23) + # │ │ ┌───────────── day of month (1-31) + # │ │ │ ┌───────────── month (1-12) + # │ │ │ │ ┌───────────── day of week (0-6) (Sunday=0) + # │ │ │ │ │ + # 0 18 * * * + # schedule this job every day at 6:00 PM (18:00). + cronExpression: 0 18 * * * # update the cronExpression to your need + enabled: true # set to `true` to schedule base backup periodically + retentionPeriod: 7d # set the retention period to your need +``` + +2. Enable and customize the backup schedule: +```bash +kubectl edit backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +Update these key parameters: +- `enabled`: Set to `true` to activate scheduled backups +- `cronExpression`: Configure backup frequency using cron syntax +- `retentionPeriod`: Set how long to keep backups (e.g., `7d`, `1mo`) + +Example configuration for daily backups at 6PM UTC with 7-day retention: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. Verify the schedule configuration: +```bash +# Check schedule status +kubectl get backupschedule qdrant-cluster-qdrant-backup-schedule -n demo -w + +# View detailed configuration +kubectl describe backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +## Monitoring and Managing Backups + +After enabling scheduled backups, monitor their execution and manage backup retention: + +1. View all backups: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +2. Inspect backup details: +```bash +kubectl describe backup -n demo +``` + +3. Verify backup artifacts: +- Status should show "Completed" +- Check backup size matches expectations +- Confirm retention period is being applied +- Validate backup files exist in repository + +4. Manage backup retention: +- To manually delete old backups: +```bash +kubectl delete backup -n demo +``` +- To modify retention period: +```bash +kubectl edit backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +## Cleanup +To remove all created resources, delete the Qdrant cluster along with its namespace: + +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated: +1. Configuration of automated Qdrant backups +2. Schedule customization using cron syntax +3. Retention policy management +4. Backup verification procedures + +Your Qdrant cluster now has: +- Regular automated backups +- Configurable retention policies +- Complete backup history tracking diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..25ec4e27 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,161 @@ +--- +title: Restore a Qdrant Cluster from Backup +description: Learn how to restore a new Qdrant cluster from an existing backup in KubeBlocks using the Cluster Annotation or OpsRequest API. +keywords: [Qdrant, Restore, Backup, KubeBlocks, Kubernetes] +sidebar_position: 5 +sidebar_label: Restore Qdrant Cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a Qdrant Cluster from Backup + +This guide demonstrates two methods to restore a Qdrant cluster from backup in KubeBlocks: + +1. **Cluster Annotation Method** - Simple declarative approach using YAML annotations +2. **OpsRequest API Method** - Enhanced operational control with progress monitoring + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Preparing for Restoration: Locate one Full Backup +Before restoring, ensure that there is a full backup available. The restoration process will use this backup to create a new Qdrant cluster. + +- Backup repository accessible from new cluster +- Valid full backup in `Completed` state +- Adequate CPU/memory resources +- Sufficient storage capacity + +Find available full backups: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=qdrant-cluster # get the list of full backups +``` + +Pick ONE of the Backups whose status is `Completed`. + +## Option 1: Cluster Annotation Restoration + +### Step 1: Create Restored Cluster +Create a new cluster with restore configuration: + +Key parameters: +- `kubeblocks.io/restore-from-backup` annotation +- Backup name and namespace located from the previous steps + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster-restored + namespace: demo + annotations: + # NOTE: replace with your backup + kubeblocks.io/restore-from-backup: '{"qdrant":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 2: Monitor Restoration +Track restore progress with: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +## Option 2: OpsRequest API Restoration + +### Step 1: Initiate Restore Operation +Create restore request via OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-restore + namespace: demo +spec: + clusterName: qdrant-cluster-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### Step 2: Track Operation Progress +Monitor restore status: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +### Step 3: Validate Restored Cluster +Confirm successful restoration: +```bash +kubectl get cluster qdrant-cluster-restored -n demo +``` +Example Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster-restored qdrant Delete Running 3m3s +``` + + +## Cleanup +To remove all created resources, delete the Qdrant cluster along with its namespace: + +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete cluster qdrant-cluster-restored -n demo +kubectl delete ns demo +``` + +## Summary + +This guide covered two restoration methods: + +1. **Cluster Annotation** - Simple YAML-based approach + - Retrieve system credentials + - Create cluster with restore annotation + - Monitor progress + +2. **OpsRequest API** - Enhanced operational control + - Create restore request + - Track operation status + - Verify completion diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/_category_.yml new file mode 100644 index 00000000..cd4faeaf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Backup And Restores +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..1e28cd31 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,246 @@ +--- +title: Observability for Qdrant Clusters with the Prometheus Operator +description: Learn how to set up observability for Qdrant Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, Qdrant, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for Qdrant Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Qdrant Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for Qdrant clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in Qdrant exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a Qdrant Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint + +```bash +kubectl -n demo exec -it pods/qdrant-cluster-qdrant-0 -c kbagent -- \ + curl -s http://127.0.0.1:6333/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: qdrant-cluster-pod-monitor + namespace: demo + labels: # Must match the setting in 'prometheus.spec.podMonitorSelector' + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # defines the labels which are transferred from the + # associated Kubernetes 'Pod' object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: tcp-qdrant # Must match exporter port name + scheme: http + namespaceSelector: + matchNames: + - demo # Target namespace + selector: + matchLabels: + app.kubernetes.io/instance: qdrant-cluster +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where Qdrant runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/qdrant-cluster-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="qdrant-cluster"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-3", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.64:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-3" + }, + "value": [ + 1747583924.040, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-0", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.62:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-0" + }, + "value": [ + 1747583924.040, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-2", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.60:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-2" + }, + "value": [ + 1747583924.040, + "1" + ] + } + ] + } +} +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks Qdrant dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Choose one of these methods: + - Paste the dashboard URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/qdrant/dashboards/qdrant-overview.json` + - Or upload the JSON file directly + +![qdrant-monitoring-grafana-dashboard.png](/img/docs/en/qdrant-monitoring-grafana-dashboard.png) + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor qdrant-cluster-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a Qdrant cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the Qdrant exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your Qdrant databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_category_.yml new file mode 100644 index 00000000..9d9a2d92 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for Qdrant Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..586d332d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx @@ -0,0 +1,36 @@ +KubeBlocks uses a declarative approach for managing Qdrant Clusters. +Below is an example configuration for deploying a Qdrant Cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..837c7256 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` + +Expected Output: + +```bash +kubectl get cluster qdrant-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 49s +qdrant-cluster qdrant Delete Running 62s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 1m43s +qdrant-cluster-qdrant-1 2/2 Running 0 1m28s +qdrant-cluster-qdrant-2 2/2 Running 0 1m14s +``` + +Once the cluster status becomes Running, your Qdrant cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/01-overview.mdx new file mode 100644 index 00000000..b84a1e4d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/01-overview.mdx @@ -0,0 +1,51 @@ +--- +title: Overview of KubeBlocks RabbitMQ Addon +description: Learn about the features and capabilities of the KubeBlocks RabbitMQ addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [RabbitMQ, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks RabbitMQ Addon + +RabbitMQ is an open-source and lightweight message broker which supports multiple messaging protocols. + +## Key features + +### Lifecycle Management + +KubeBlocks simplifies RabbitMQ operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for RabbitMQ instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Password Management** | Ability to set and manage custom root password for the RabbitMQ cluster during creation | +| **Custom Services** | Expose specialized database endpoints | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + +### Supported Versions + +KubeBlocks RabbitMQ Addon supports these RabbitMQ versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 3.8 | 3.8.14| +| 3.9 | 3.9.29| +| 3.10 | 3.10.25| +| 3.11 | 3.11.28| +| 3.12 | 3.12.14| +| 3.13 | 3.13.2, 3.13.7| +| 4.0 | 4.0.9| + +The list of supported versions can be found by following command: +```bash +kubectl get cmpv rabbitmq +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/02-quickstart.mdx new file mode 100644 index 00000000..4501688c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/02-quickstart.mdx @@ -0,0 +1,484 @@ +--- +title: RabbitMQ Quickstart +description: Comprehensive guide to deploying and managing RabbitMQ ReplicaSet Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, RabbitMQ, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# RabbitMQ Quickstart + +This guide provides a comprehensive walkabout for deploying and managing RabbitMQ ReplicaSet Clusters using the **KubeBlocks RabbitMQ Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify RabbitMQ Add-on + +The RabbitMQ Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep rabbitmq +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-rabbitmq kb-system 1 2025-05-21 deployed rabbitmq-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/rabbitmq --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-rabbitmq kubeblocks-addons/rabbitmq --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search rabbitmq + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install rabbitmq --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + rabbitmq 0.9.0 kubeblocks + rabbitmq 0.9.1 kubeblocks + rabbitmq 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable rabbitmq + # Disable Add-on + kbcli addon disable rabbitmq + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the RabbitMQ Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported RabbitMQ Versions + +**List available RabbitMQ versions:** + +```bash +kubectl get cmpv rabbitmq +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +rabbitmq 4.0.9,3.13.7,3.13.2,3.12.14,3.11.28,3.10.25,3.9.29,3.8.14 Available 26d +``` +
+ +**Check version compatibility for ComponentDefinitions** + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv rabbitmq -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +rabbitmq-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv rabbitmq -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("rabbitmq"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `rabbitmq`: + +
+Example Output +```text +4.0.9 +3.13.7 +3.13.2 +3.12.14 +3.11.28 +3.10.25 +3.9.29 +3.8.14 +``` +
+ +### Storage Configuration + +RabbitMQ requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a RabbitMQ Cluster + +Deploy a basic RabbitMQ Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/cluster.yaml +``` + +This creates: +- A RabbitMQ Cluster with 3 replicas. +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `rabbitmq` to create a RabbitMQ Cluster + clusterDef: rabbitmq + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + # Valid options are: [clustermode] + topology: clustermode + componentSpecs: + - name: rabbitmq + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # Valid options are: [3.10.25,3.11.28,3.12.14,3.13.2,3.13.7,3.8.14,3.9.29] + serviceVersion: 3.13.7 + # Update `replicas` to your need. + # Recommended values are: [3,5,7] + replicas: 3 + # Specifies the resources required by the Component. + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +### Create a Version-Specific RabbitMQ Cluster + +To create a cluster with a specific version, configure `spec.componentSpecs.serviceVersion` (major.minor version) fields before applying it: + + + + ```yaml + componentSpecs: + - name: rabbitmq + serviceVersion: 4.0.9 + ``` + + + ```yaml + componentSpecs: + - name: rabbitmq + serviceVersion: 3.13.7 # Valid options: [3.13.7,3.13.2,3.12.14,3.11.28,3.10.25,3.9.29,3.8.14] + ``` + + + +:::note + +RabbitMQ needs `peer discovery` role to create events and get endpoints. This is essential for discovering other RabbitMQ nodes and forming a cluster. + +KubeBlocks will create SA with corresponding privilleges (Roles) on provisioning RabbitMQ Clusters. + +::: +## Verify Cluster Status + +When deploying a RabbitMQ Cluster with 3 replicas: + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational + +Check status using either method: + + + +```bash +kubectl get cluster rabbitmq-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +rabbitmq-cluster rabbitmq Delete Creating 27s +rabbitmq-cluster rabbitmq Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 92s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 77s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 63s +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe rabbitmq-cluster -n demo + +Name: rabbitmq-cluster Created Time: May 18,2025 23:05 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo rabbitmq clustermode Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +rabbitmq rabbitmq-cluster-rabbitmq.demo.svc.cluster.local:5672 + rabbitmq-cluster-rabbitmq.demo.svc.cluster.local:15672 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-0 Running zone-x x.y.z May 18,2025 23:05 UTC+0800 +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-1 Running zone-x x.y.z May 18,2025 23:06 UTC+0800 +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-2 Running zone-x x.y.z May 18,2025 23:06 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +rabbitmq 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +rabbitmq rabbitmq-1.0.0 docker.io/library/rabbitmq:3.13.7-management + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo rabbitmq-cluster +``` + + + + +## Access RabbitMQ Management Console + +**Retrieve credentials** +The user and password can be found in the cluster secrets named after `--account-`. In this case, the secret name is `rabbitmq-cluster-rabbitmq-account-root`. + +```bash +# get user name +NAME=$(kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.username}' | base64 -d) +# get password +PASSWD=$(kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.password}' | base64 -d) +``` + +**Port-forward Service** + +```bash +kubectl port-forward svc/rabbitmq-cluster-rabbitmq -ndemo 15672:15672 +``` + +**Access Management Console** + +Then log in to the RabbitMQ Management console at `http://:/` with the user and password. + +## Stop the RabbitMQ Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-stop + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: rabbitmq + stop: true # Set to stop component + replicas: 3 + ``` + + + +## Start the RabbitMQ Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-start + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + +## Delete RabbitMQ Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster rabbitmq-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster rabbitmq-cluster -n demo +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..b60c4d4d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,281 @@ +--- +title: RabbitMQ Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage RabbitMQ Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, RabbitMQ, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# RabbitMQ Cluster Lifecycle Management + +This guide demonstrates how to manage a RabbitMQ Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a RabbitMQ Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-stop-ops + namespace: demo +spec: + clusterName: rabbitmq-cluster + type: Stop +``` + + + + +Option 2: Cluster API Patch + +Modify the cluster spec directly by patching the stop field: + +```bash +kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Stopping 6m3s + rabbitmq-cluster rabbitmq Delete Stopped 6m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=rabbitmq-cluster -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-rabbitmq-cluster-rabbitmq-0 Bound pvc-uuid 20Gi RWO 22m + data-rabbitmq-cluster-rabbitmq-1 Bound pvc-uuid 20Gi RWO 21m + data-rabbitmq-cluster-rabbitmq-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### Starting the Cluster + +Starting a stopped RabbitMQ Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: rabbitmq-cluster + type: Start +``` + + + + + +Modify the cluster spec to resume operation: +1. Set stop: false, or +2. Remove the stop field entirely + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Updating 24m + rabbitmq-cluster rabbitmq Delete Running 24m + rabbitmq-cluster rabbitmq Delete Running 24m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + rabbitmq-cluster-rabbitmq-0 2/2 Running 0 55s + rabbitmq-cluster-rabbitmq-1 2/2 Running 0 44s + rabbitmq-cluster-rabbitmq-2 2/2 Running 0 33s + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Using OpsRequest API** + +Target specific components `rabbitmq` for restart: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-restart-ops + namespace: demo +spec: + clusterName: rabbitmq-cluster + type: Restart + restart: + - componentName: rabbitmq +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest rabbitmq-cluster-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 0/3 4s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 1/3 28s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 2/3 56s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 2/3 109s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster + ``` + Note: Pods will show new creation timestamps after restart + +3. Verify component health: + ```bash + kbcli cluster describe rabbitmq-cluster -n demo + ``` + +Once the operation is complete, the cluster will return to the Running state. + +## Summary +In this guide, you learned how to: +1. Stop a RabbitMQ Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your RabbitMQ Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..fd2f24bc --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,174 @@ +--- +title: Vertical Scaling in a RabbitMQ Cluster +description: Learn how to perform vertical scaling in a RabbitMQ Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, RabbitMQ, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for RabbitMQ Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a RabbitMQ Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for RabbitMQ instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks ensures minimal impact during scaling operations by following a controlled, role-aware update strategy: +**Role-Aware Replicas (Primary/Secondary Replicas)** +- Secondary replicas update first – Non-leader pods are upgraded to minimize disruption. +- Primary updates last – Only after all secondaries are healthy does the primary pod restart. +- Cluster state progresses from Updating → Running once all replicas are stable. + +**Role-Unaware Replicas (Ordinal-Based Scaling)** +If replicas have no defined roles, updates follow Kubernetes pod ordinal order: +- Highest ordinal first (e.g., pod-2 → pod-1 → pod-0) to ensure deterministic rollouts. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Pods are updated in pod ordinal order, from highest to lowest, (e.g., pod-2 → pod-1 → pod-0) +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the rabbitmq component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-vscale-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: VerticalScaling + verticalScaling: + - componentName: rabbitmq + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops rabbitmq-cluster-vscale-ops -w + ``` + + Expected Result: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 0/3 32s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 1/3 55s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 2/3 82s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 3/3 2m13s + ``` + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 3 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting RabbitMQ parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe rabbitmq-cluster -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +rabbitmq 1 / 1 1Gi / 1Gi data:20Gi +``` + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the RabbitMQ Cluster along with its namespace: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a RabbitMQ Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the rabbitmq component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your RabbitMQ Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..02fd1997 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,236 @@ +--- +title: Horizontal Scaling of RabbitMQ Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a RabbitMQ cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, RabbitMQ, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for RabbitMQ Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a RabbitMQ cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running`. +2. Cluster status changes from `Updating` to `Running` + +:::note + +RabbitMQ quorum queue are designed based on the **Raft consensus algorithm**. +Better to have an odd number of replicas, such as 3, 5, 7, to avoid split-brain scenarios, after scaling out/in the cluster. + +::: + + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the RabbitMQ cluster by adding 1 replica to rabbitmq component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-scale-out-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops rabbitmq-cluster-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Running 0/1 9s + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Running 1/1 16s + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 16s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 4 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the RabbitMQ cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 6m24s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 7m19s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 5m57s +rabbitmq-cluster-rabbitmq-3 2/2 Running 0 3m54s +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the RabbitMQ cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-scale-in-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops rabbitmq-cluster-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Running 0/1 8s + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Running 1/1 24s + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 2 # decrease replicas to scale-in + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 2}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 18m +``` + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the RabbitMQ cluster along with its namespace: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a RabbitMQ cluster. +- Perform scale-in operations to remove replicas from a RabbitMQ cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..b9a41955 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx @@ -0,0 +1,218 @@ +--- +title: Expanding Volume in a RabbitMQ Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a RabbitMQ cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, RabbitMQ, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a RabbitMQ Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a RabbitMQ cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a RabbitMQ Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage RabbitMQ clusters. Below is an example configuration for deploying a RabbitMQ cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: rabbitmq + topology: cluster + componentSpecs: + - name: rabbitmq + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the rabbitmq component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: rabbitmq + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops rabbitmq-cluster-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: rabbitmq + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe rabbitmq-cluster -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +rabbitmq 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +rabbitmq-cluster-rabbitmq-data-0 Bound pvc-uuid 30Gi RWO 33m +rabbitmq-cluster-rabbitmq-data-1 Bound pvc-uuid 30Gi RWO 33m +rabbitmq-cluster-rabbitmq-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## Cleanup +To remove all created resources, delete the RabbitMQ cluster along with its namespace: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your RabbitMQ cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..b70e1d2d --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,317 @@ +--- +title: Create and Destroy RabbitMQ Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage RabbitMQ services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, RabbitMQ, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage RabbitMQ Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage RabbitMQ Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing RabbitMQ services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## View Network Services +List the Services created for the RabbitMQ cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rabbitmq-cluster-rabbitmq ClusterIP 10.96.6.67 5672/TCP,15672/TCP 33m +``` + +## Expose RabbitMQ Service + +External service addresses enable public internet access to RabbitMQ, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the RabbitMQ service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: rabbitmq-cluster + expose: + - componentName: rabbitmq + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + ports: + - name: managment + port: 15672 + targetPort: management + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops rabbitmq-cluster-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-expose-enable-ops Expose rabbitmq-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: rabbitmq-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: rabbitmq + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: rabbitmq + name: rabbitmq-internet + serviceName: rabbitmq-internet + spec: # defines the behavior of a K8s service. + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-rabbitmq + # port to expose + port: 15672 # port 15672 for rabbitmq management console + protocol: TCP + targetPort: management + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rabbitmq-cluster-rabbitmq-internet LoadBalancer 172.20.60.24 15672:31243/TCP 1m +``` + +## Access RabbitMQ Management Console + +**Retrieve Credentials** + +KubeBlocks automatically creates a Secret containing the RabbitMQ root credentials. Retrieve the credentials: +```bash +NAME=`kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.password}' | base64 -d` +``` + +**Access Management Console** + +Then log in to the RabbitMQ Management console at `http://:/` with the user and password. + + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + expose: + - componentName: rabbitmq + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops rabbitmq-cluster-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-expose-disable-ops Expose rabbitmq-cluster Succeed 1/1 24s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Running 44m + ``` + + + +### Verify Service Removal + +Ensure that the 'rabbitmq-cluster-rabbitmq-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +Expected Result: The 'rabbitmq-cluster-rabbitmq-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the RabbitMQ cluster along with its namespace: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a RabbitMQ service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing RabbitMQ services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..c6f7aa54 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,151 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed RabbitMQ Clusters +description: Learn how to decommission (take offline) a specific Pod in a RabbitMQ cluster managed by KubeBlocks. +keywords: [KubeBlocks, RabbitMQ, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission RabbitMQ Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed RabbitMQ Clusters + +This guide explains how to decommission (take offline) specific Pods in RabbitMQ clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + + +Before decommissioning a specific pod from a component, make sure this component has more than one replicas. +If not, please scale out the component ahead. + +E.g. you can patch the cluster CR with command, to declare there are 3 replicas in component querynode. + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/2/replicas", + "value": 3 + } +]' +``` + + +To decommission a specific Pod (e.g., 'rabbitmq-cluster-rabbitmq-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-decommission-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + scaleIn: + onlineInstancesToOffline: + - 'rabbitmq-cluster-rabbitmq-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops rabbitmq-cluster-decommission-ops -n demo -w + ``` + Example Output: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Running 0/1 8s +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Running 1/1 31s +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 2 # explected replicas after decommission + offlineInstances: + - rabbitmq-cluster-rabbitmq-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 25m +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 24m +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..4894ea5b --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,226 @@ +--- +title: Observability for RabbitMQ Clusters with the Prometheus Operator +description: Learn how to set up observability for RabbitMQ Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, RabbitMQ, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for RabbitMQ Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# RabbitMQ Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for RabbitMQ clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in RabbitMQ exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a RabbitMQ Cluster + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint + +```bash +# prot-forward +kubectl -n demo port-forward pods/rabbitmq-cluster-rabbitmq-0 15692:15692 +# check metrics +curl -s http://127.0.0.1:15692/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: rabbitmq-cluster-pod-monitor + namespace: demo + labels: # Must match the setting in 'prometheus.spec.podMonitorSelector' + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # defines the labels which are transferred from the + # associated Kubernetes 'Pod' object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: prometheus # Must match exporter port name + scheme: http + namespaceSelector: + matchNames: + - demo # Target namespace + selector: + matchLabels: + app.kubernetes.io/instance: rabbitmq-cluster +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where RabbitMQ runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/rabbitmq-cluster-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="rabbitmq-cluster"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "rabbitmq-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "rabbitmq", + "apps_kubeblocks_io_pod_name": "rabbitmq-cluster-rabbitmq-0", + "container": "rabbitmq", + "endpoint": "prometheus", + "instance": "10.244.0.78:15692", + "job": "kubeblocks", + "namespace": "demo", + "pod": "rabbitmq-cluster-rabbitmq-0" + }, + "value": [ + 1747622160.396, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "rabbitmq-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "rabbitmq", + "apps_kubeblocks_io_pod_name": "rabbitmq-cluster-rabbitmq-1", + "container": "rabbitmq", + "endpoint": "prometheus", + "instance": "10.244.0.80:15692", + "job": "kubeblocks", + "namespace": "demo", + "pod": "rabbitmq-cluster-rabbitmq-1" + }, + "value": [ + 1747622160.396, + "1" + ] + } + ] + } +} +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks RabbitMQ dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Import dashboard from [Grafana RabbitMQ-Overview](https://grafana.com/grafana/dashboards/10991-rabbitmq-overview/). + +![rabbitmq-monitoring-grafana-dashboard.png](/img/docs/en/rabbitmq-monitoring-grafana-dashboard.png) + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor rabbitmq-cluster-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a RabbitMQ cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the RabbitMQ exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your RabbitMQ databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_category_.yml new file mode 100644 index 00000000..2893cd58 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for RabbitMQ Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..a39e8cf0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx @@ -0,0 +1,36 @@ +KubeBlocks uses a declarative approach for managing RabbitMQ Clusters. +Below is an example configuration for deploying a RabbitMQ Cluster with 3 replicas. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: rabbitmq + topology: clustermode + componentSpecs: + - name: rabbitmq + serviceVersion: 3.13.7 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..c3240704 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster rabbitmq-cluster -n demo -w +``` + +Expected Output: + +```bash +kubectl get cluster rabbitmq-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +rabbitmq-cluster rabbitmq Delete Creating 15s +rabbitmq-cluster rabbitmq Delete Running 83s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 106s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 82s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 47s +``` + +Once the cluster status becomes Running, your RabbitMQ cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/01-overview.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/01-overview.mdx new file mode 100644 index 00000000..ecb1b9e3 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/01-overview.mdx @@ -0,0 +1,68 @@ +--- +title: Overview of KubeBlocks Redis Addon +description: Learn about the features and capabilities of the KubeBlocks Redis addon, including deployment topologies, lifecycle management, backup and restore, and supported versions. +keywords: [Redis, KubeBlocks, operator, database, features, lifecycle management, backup, restore] +sidebar_position: 1 +sidebar_label: Overview +--- + +# Overview of KubeBlocks Redis Addon + +Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. This example shows how it can be managed in Kubernetes with KubeBlocks. + + +## Key Features + +### Supported Topologies + +| Topology | Data Distribution | Scalability | High Availability | Use Cases | +|---------------|-------------------|-------------|--------------------|-------------------------------| +| **Standalone**| Single node | No | No | Development/testing, small datasets | +| **Replication** with sentinel | Primary-Secondary replication | Read scaling | Yes | Read-heavy workloads, data redundancy needed | +| **Cluster** | Sharded storage | Read/write scaling | Yes | Large datasets, high-concurrency production environments | + +### Lifecycle Management + +KubeBlocks simplifies Redis operations with comprehensive lifecycle management: + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------| +| **Horizontal Scaling** | Scale replicas in/out to adjust capacity | +| **Vertical Scaling** | Adjust CPU/memory resources for Redis instances | +| **Volume Expansion** | Dynamically increase storage capacity without downtime | +| **Restart Operations** | Controlled cluster restarts with minimal disruption | +| **Start/Stop** | Temporarily suspend/resume cluster operations | +| **Password Management** | Ability to set and manage custom root password for the Redis cluster during creation | +| **Dynamic Configuration** | Modify Redis parameters without restarting | +| **Custom Services** | Expose specialized database endpoints | +| **Switchover** | Planned primary-replica role changes | +| **Replica Management** | Safely decommission or rebuild specific replicas | +| **Version Upgrades** | Perform minor version upgrades seamlessly | +| **Advanced Scheduling** | Customize pod placement and resource allocation | +| **TLS Encryption** | Enable/disable transport layer security | +| **Monitoring** | Integrated Prometheus metrics collection | +| **Logging** | Centralized logs via Loki Stack | + + +### Backup and Restore + +KubeBlocks supports multiple backup strategies for Redis: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | datafile | Uses `redis-cli BGSAVE` command to backup data | +| Continuous Backup | aof | Continuously perform incremental backups by archiving Append-Only Files (AOF) | + +### Supported Versions + +KubeBlocks Redis Addon supports these Redis versions: + +| Major Version | Supported Minor Versions | +|---------------|--------------------------------| +| 7.0 | 7.0.6 | +| 7.2 | 7.2.4, 7.2.7 | + +The list of supported versions can be found by following command: +```bash +kubectl get cmpv redis +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/02-quickstart.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/02-quickstart.mdx new file mode 100644 index 00000000..53568939 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/02-quickstart.mdx @@ -0,0 +1,557 @@ +--- +title: Redis Quickstart +description: Comprehensive guide to deploying and managing Redis Replication Clusters with KubeBlocks, including installation, configuration, and operational best practices, an alternative to dedicated operator. +keywords: [Kubernetes Operator, Redis, KubeBlocks, Helm, Cluster Management, QuickStart] +sidebar_position: 2 +sidebar_label: Quickstart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis Quickstart + +This guide provides a comprehensive walkabout for deploying and managing Redis Replication Clusters using the **KubeBlocks Redis Add-on**, covering: +- System prerequisites and add-on installation +- Cluster creation and configuration +- Operational management including start/stop procedures +- Connection methods and cluster monitoring + +## Prerequisites + +### System Requirements + +Before proceeding, verify your environment meets these requirements: + +- A functional Kubernetes cluster (v1.21+ recommended) +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) + +### Verify Redis Add-on + +The Redis Add-on is included with KubeBlocks by default. Check its status: + +```bash +helm list -n kb-system | grep redis +``` + +
+Example Output: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-redis kb-system 1 2025-05-21 deployed redis-1.0.0 +``` +
+ +If the add-on isn't enabled, choose an installation method: + + + + + ```bash + # Add Helm repo + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # For users in Mainland China, if GitHub is inaccessible or slow, use this alternative repo: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update helm repo + helm repo update + # Search available Add-on versions + helm search repo kubeblocks/redis --versions + # Install your desired version (replace with your chosen version) + helm upgrade -i kb-addon-redis kubeblocks-addons/redis --version -n kb-system + ``` + + + + + ```bash + # Add an index (kubeblocks is added by default) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # Update the index + kbcli addon index update kubeblocks + # Update all indexes + kbcli addon index update --all + ``` + + To search and install an addon: + + ```bash + # Search Add-on + kbcli addon search redis + # Install Add-on with your desired version (replace with your chosen version) + kbcli addon install redis --version + ``` + **Example Output:** + ```bash + ADDON VERSION INDEX + redis 0.9.0 kubeblocks + redis 0.9.1 kubeblocks + redis 1.0.0 kubeblocks + ``` + To enable or disable an addon: + + ```bash + # Enable Add-on + kbcli addon enable redis + # Disable Add-on + kbcli addon disable redis + ``` + + + + +:::note +**Version Compatibility** + +Always verify that the Redis Add-on version matches your KubeBlocks major version to avoid compatibility issues. + +::: + +### Verify Supported Redis Versions + +**List available Redis versions:** + +```bash +kubectl get cmpv redis +``` +
+Example Output +```text +NAME VERSIONS STATUS AGE +redis 7.2.7,7.2.4,7.0.6 Available 33d +``` +
+ +**Check version compatibility for ComponentDefinitions** + +**Step 1.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv redis -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+Example Output +```text +redis-7-1.0.0 +``` +
+ +**Step 2.** Get the list of `ComponentDefinition` associated with a given `ComponentVersion` + +```bash +kubectl get cmpv redis -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^redis-7"))) | .releases[]' +``` + +This returns versions compatible with `ComponentDefinition` named `redis-14`: + +
+Example Output +```text +7.2.7 +7.2.4 +7.0.6 +``` +
+ +### Storage Configuration + +Redis requires persistent storage. Verify available options: + +```bash +kubectl get storageclass +``` + +Recommended storage characteristics: +- Minimum 20Gi capacity +- ReadWriteOnce access mode +- Supports volume expansion +- Appropriate performance for workload + +## Deploy a Redis ReplicationCluster + +Deploy a basic Redis Replication Cluster with default settings: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/cluster.yaml +``` + +This creates: +- A Redis Replication Cluster with two components, Redis(2 replicas) and Redis Sentinel(3 replicas). +- Default resource allocations (0.5 CPU, 0.5Gi memory) +- 20Gi persistent storage +- Automatic primary-replica configuration + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. + # This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `redis` to create a Redis Cluster + clusterDef: redis + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + # Determines whether metrics exporter information is annotated on the + # Component's headless Service. + # Valid options are [true, false] + disableExporter: false + # Specifies the desired number of replicas in the Component + replicas: 2 + # Specifies the resources required by the Component. + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + # Specifies the resources required by the Component. + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +For more API fields and descriptions, refer to the [API Reference](../user_docs/references/api-reference/cluster). + +### Create a Version-Specific Redis Replication Cluster + +To create a cluster with a specific version, configure `spec.componentSpecs.serviceVersion` (major.minor version) fields before applying it: + + + + ```yaml + componentSpecs: + - name: redis + serviceVersion: 7.2.4 # Valid options: [7.0.6, 7.2.4, 7.2.7] + ``` + + + +## Verify Cluster Status + +When deploying a Redis Replication Cluster with 5 replicas, 2 for redis and 3 for redis sentinel: +- Redis runs with one Primary replica (read/write operations) and one Secondary replica (read-only operations) + +Confirm successful deployment by checking: + +1. Cluster phase is `Running` +2. All pods are operational +3. Replicas have correct roles + +Check status using either method: + + + +```bash +kubectl get cluster redis-replication -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 3m49s + +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + + + + + With `kbcli` installed, you can view comprehensive cluster information: + +```bash +kbcli cluster describe redis-replication -n demo + +Name: redis-replication Created Time: May 17,2025 15:45 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo redis replication Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +redis redis-replication-redis-redis.demo.svc.cluster.local:6379 +redis-sentinel redis-replication-redis-sentinel-redis-sentinel.demo.svc.cluster.local:26379 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +redis 7.2.4 redis-replication-redis-0 primary Running zone-x x.y.z MM/DD +redis 7.2.4 redis-replication-redis-1 secondary Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-0 Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-1 Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-2 Running zone-x x.y.z MM/DD + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +redis 500m / 500m 512Mi / 512Mi data:20Gi +redis-sentinel 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +redis redis-7-1.0.0 docker.io/redis/redis-stack-server:7.2.0-v10 + docker.io/apecloud/agamotto:0.1.2-beta.1 + docker.io/redis/redis-stack-server:7.2.0-v14 +redis-sentinel redis-sentinel-7-1.0.0 docker.io/redis/redis-stack-server:7.2.0-v14 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo redis-replication +``` + + + + +## Access the Redis Replication Cluster + +KubeBlocks automatically provisions: +1. Credentials stored in Secret `redis-replication-redis-account-default` +2. ClusterIP Service `redis-replication-redis-redis` + +### Retrieve Credentials +```bash +# Get username +NAME=$(kubectl get secret -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 --decode) +# Get password +PASSWD=$(kubectl get secret -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 --decode) +``` + +### Connection Methods + + + + + Connect directly to a pod: + ```bash + kubectl exec -ti -n demo redis-replication-redis-0 -- \ + redis-cli -h redis-replication-redis-redis -a ${PASSWD} + ``` + + + + + 1. Forward service port: + ```bash + kubectl port-forward svc/redis-replication-redis-redis 6379:6379 -n demo + ``` + + 2. Connect via localhost: + ```bash + redis-cli -h 127.0.0.1 -a ${PASSWD} + ``` + + + +:::note +**Production Considerations** + +For production environments, avoid using `kubectl exec` and `port-forward`. Instead implement: +- LoadBalancer or NodePort Services for external access +- Network policies to restrict access +- TLS encryption for secure connections +- Connection pooling for better performance +::: + +## Stop the Redis Replication Cluster + +Stopping a cluster temporarily suspends operations while preserving all data and configuration: + +**Key Effects:** +- Compute resources (Pods) are released +- Persistent storage (PVCs) remains intact +- Service definitions are maintained +- Cluster configuration is preserved +- Operational costs are reduced + + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-stop + namespace: demo + spec: + clusterName: redis-replication + type: Stop + ``` + + + + Alternatively, stop by setting `spec.componentSpecs.stop` to true: + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: redis + stop: true # Set to stop component + replicas: 2 + ``` + + + +## Start the Redis Replication Cluster + +Restarting a stopped cluster resumes operations with all data and configuration intact. + +**Key Effects:** +- Compute resources (Pods) are recreated +- Services become available again +- Cluster returns to previous state + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-start + namespace: demo + spec: + clusterName: redis-replication + type: Start + ``` + + + + Restart by setting `spec.componentSpecs.stop` to false: + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + + +## Delete Redis Replication Cluster + +Choose carefully based on your data retention needs: + +| Policy | Resources Removed | Data Removed | Recommended For | +|-----------------|-------------------|--------------|-----------------| +| DoNotTerminate | None | None | Critical production clusters | +| Delete | All resources | PVCs deleted | Non-critical environments | +| WipeOut | All resources | Everything* | Test environments only | + +*Includes snapshots and backups in external storage + +**Pre-Deletion Checklist:** +1. Verify no applications are using the cluster +2. Ensure required backups exist +3. Confirm proper terminationPolicy is set +4. Check for dependent resources + +For test environments, use this complete cleanup: + +```bash +kubectl patch cluster redis-replication -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster redis-replication -n demo +``` + +## Why Redis Sentinel starts before Redis + +Redis Sentinel is a high availability solution for Redis. It provides monitoring, notifications, and automatic failover for Redis instances. + +Each Redis replica, from the Redis component, upon startup, will connect to the Redis Sentinel instances to get the current leader and follower information. It needs to determine: + +- Whether it should act as the primary (master) node. +- If not, which node is the current primary to replicate from. + +In more detail, each Redis replica will: + +1. Check for Existing Primary Node + - Queries Redis Sentinel to find out if a primary node is already elected. + - Retrieve the primary's address and port. +1. Initialize as Primary if Necessary + - If no primary is found (e.g., during initial cluster setup), it configures the current Redis instance to become the primary. + - Updates Redis configuration to disable replication. +1. Configure as Replica if Primary Exists + - If a primary is found, it sets up the current Redis instance as a replica. + - Updates the Redis configuration with the `replicaof` directive pointing to the primary's address and port. + - Initiates replication to synchronize data from the primary. + +KubeBlocks ensures that Redis Sentinel starts first to provide the necessary information for the Redis replicas to initialize correctly. Such dependency is well-expressed in the KubeBlocks CRD `ClusterDefinition` ensuring the correct startup order. + +More details on how components for the `replication` topology are started, upgraded can be found in: + +```bash +kubectl get cd redis -oyaml | yq '.spec.topologies[] | select(.name=="replication") | .orders' +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/01-standlone.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/01-standlone.mdx new file mode 100644 index 00000000..31d15a6a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/01-standlone.mdx @@ -0,0 +1,93 @@ +--- +title: Deploying a Redis Standalone Cluster with KubeBlocks +description: Learn how to deploy a Redis Standalone cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, Redis, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: Redis Standalone Cluster +--- + +# Deploying a Redis Standalone Cluster with KubeBlocks + +A standalone Redis deployment consists of a single Redis server instance running independently without any replication or clustering. It is the simplest and most lightweight deployment model. + +**Use Cases** +- Development & testing environments. +- Small applications with low traffic. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Redis Standalone Cluster + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-standalone + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis # set to reids + topology: standalone # set topology to standalone + componentSpecs: + - name: redis + replicas: 1 # set replica to 1 + serviceVersion: 7.2.4 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Key Configuration Details**: +- `clusterDef: redis`: Specifies the ClusterDefinition CR for the cluster. +- `topology: standalone`: Configures the cluster to use standalone topology. +- `componentSpecs`: Defines the components in the cluster: + - Component 'redis': + - `serviceVersion: 7.2.4`: Specifies the version of the Redis service to be deployed. + + +## Verifying the Deployment + +### Check the Cluster Status +Once the cluster is deployed, check its status: +```bash +kubectl get cluster redis-standalone -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-standalone redis Delete Running 34s +``` + +### Verify Component Status +```bash +kubectl get component redis-standalone-redis -n demo +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-standalone-redis redis-7-1.0.0 7.2.4 Running 90s +``` + +## Cleanup +To remove all resources created during this tutorial: + +```bash +kubectl delete cluster redis-standalone -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/02-replication.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/02-replication.mdx new file mode 100644 index 00000000..09f3be8b --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/02-replication.mdx @@ -0,0 +1,130 @@ +--- +title: Deploying a Redis Replication Cluster with KubeBlocks +description: Learn how to deploy a Redis Replication cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, Redis, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: Redis Replication Cluster +--- + +# Deploying a Redis Replication Cluster with KubeBlocks + +Redis Replication involves a primary (master) node that handles writes and one or more replica (slave) nodes that replicate data from the master for read scaling and failover. + +**Use Cases** +- Read-heavy applications (e.g., analytics workload). +- High-availability setups with Redis Sentinel for automatic failover. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Redis Replication Cluster + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**Key Configuration Details**: +- `clusterDef: redis`: Specifies the ClusterDefinition CR for the cluster. +- `topology: replication`: Configures the cluster to use replication topology. +- `componentSpecs`: Defines the components in the cluster: + - Component 'redis': + - `serviceVersion: 7.2.4`: Specifies the version of the Redis service to be deployed. + - Component 'redis-sentinel': + - Redis Sentinel is a high availability solution for Redis. Recommended to deploy 3 replica for high availability. + +## Verifying the Deployment + +### Check the Cluster Status +Once the cluster is deployed, check its status: +```bash +kubectl get cluster redis-replication -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 66s +``` + +### Verify Component and Pod Status +```bash +kubectl get component redis-replication-redis -n demo +``` +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-replication-redis redis-7-1.0.0 7.2.4 Running 90s +``` + +Check pods and their roles + +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + +## Cleanup +To remove all resources created during this tutorial: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/03-sharding.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/03-sharding.mdx new file mode 100644 index 00000000..2885bc8f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/03-sharding.mdx @@ -0,0 +1,236 @@ +--- +title: Deploying a Redis Sharding Cluster with KubeBlocks +description: Learn how to deploy a Redis Sharding cluster using KubeBlocks. This guide covers configuration, verification, failover testing, and timeout configuration. +keywords: [KubeBlocks, Redis, Kubernetes, High Availability] +sidebar_position: 1 +sidebar_label: Redis Sharding Cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deploying a Redis Sharding Cluster (Cluster Mode) with KubeBlocks + +Redis Cluster distributes data across multiple nodes (shards) using hash-based partitioning, allowing horizontal scaling for both reads and writes. + +**Use Cases** +- Large-scale applications requiring high throughput. +- Distributed caching and session storage. +- Write-heavy workloads (e.g., real-time analytics). + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Redis Sharding Cluster + +To create a redis sharding cluster (cluster mode) with 3 shards, and 2 replica for each shard: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-sharding + namespace: demo +spec: + terminationPolicy: Delete + shardings: + - name: shard # the name of the shard + shards: 3 # the number of shards to create for the cluster + template: + name: redis + componentDef: redis-cluster-7 # the name of the component definition for each shard + replicas: 2 # replicas is the number of replicas to create for each shard + resources: + limits: + cpu: '1' + memory: 1Gi + requests: + cpu: '1' + memory: 1Gi + serviceVersion: 7.2.4 + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + services: + # The service `redis-advertised` is defined in `ComponentDefinition` + # and it is used to parse the advertised endpoints of the Redis pods. + - name: redis-advertised # This is a per-pod svc, and will be used to parse advertised endpoints + podService: true + # - NodePort + # - LoadBalancer + serviceType: NodePort +``` + +**Key Configuration Details**: +- `shardings`: Specifies a list of ShardingSpec objects that configure the sharding topology for components of a Cluster. +- `shards`: Specifies the number of shards to create for the cluster. +- `serviceType`: Specifies the service type of `redis-advertised` service, which is used to parse the advertised endpoints of the Redis pods. + By default, the service type is `NodePort`. If you want to expose the service to the outside of the cluster, you can override the service type to `NodePort` or `LoadBalancer` depending on your need. + +:::tip + +A Redis cluster needs a minimum of **three** master nodes to ensure high availability and prevent data inconsistency. + +A production-ready Redis Cluster is typically recommended to have at least six nodes: three masters for sharding and failover consensus, and three replicas to act as backups for each master. + +When creating or scaling-in redis clusters, make sure the `shards` is greater than or equal to **3**. +::: + +## Verifying the Deployment + +### Check the Cluster Status +Once the cluster is deployed, check its status: +```bash +kubectl get cluster redis-sharding -n demo -w +``` +Expected Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-sharding Delete Running 103s +``` + +### Verify Component and Pod Status + +Get all components working for this cluster: +```bash +kubectl get cmp -l app.kubernetes.io/instance=redis-sharding -n demo +``` + +Expected Output: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-sharding-shard-5cd redis-cluster-7-1.0.0 7.2.4 Running 2m34s +redis-sharding-shard-drg redis-cluster-7-1.0.0 7.2.4 Running 2m34s +redis-sharding-shard-tgf redis-cluster-7-1.0.0 7.2.4 Running 2m34s +``` +Each component stands for a shard, with hash id as suffix. + +Check pods and their roles + +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-sharding -L kubeblocks.io/role -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-sharding-shard-5cd-0 2/2 Running 0 3m55s primary +redis-sharding-shard-5cd-1 2/2 Running 0 3m35s secondary +redis-sharding-shard-drg-0 2/2 Running 0 3m53s primary +redis-sharding-shard-drg-1 2/2 Running 0 3m35s secondary +redis-sharding-shard-tgf-0 2/2 Running 0 3m54s primary +redis-sharding-shard-tgf-1 2/2 Running 0 3m36s secondary +``` +There are in-total six replicas in the cluster, two (one primarily and one secondary) for each component. + +## Scaling Shards + +### Scaling-Out Shards (Add Shards) +**Expected Workflow**: + +1. A new Component is provisioned with two replicas, one primary and one secondary. +2. Cluster status changes from `Updating` to `Running` when all the components are ready (status is `Running`). + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + To increase the number of shards to `4`, you can use the following OpsRequest: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-sharding-scale-out-ops + namespace: demo + spec: + clusterName: redis-sharding + type: HorizontalScaling + horizontalScaling: + - componentName: shard + shards: 4 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops redis-sharding-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-sharding-scale-out-ops HorizontalScaling redis-sharding Running 0/1 35s + redis-sharding-scale-out-ops HorizontalScaling redis-sharding Succeed 1/1 2m35s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `shards` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: shard + shards: 4 + # remaining fields are the same as the original cluster CR, omited for brevity + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster redis-sharding -n demo --type=json -p='[{"op": "replace", "path": "/spec/shardings/0/shards", "value": 4}]' + ``` + + + + +Similar to scaling-out, you can also scale-in the cluster by decreasing the `shards` field in the Cluster resource. But make sure the `shards` is greater than or equal to 3. + +## Switchover + +To switchover a shard, named `redis-sharding-shard-5cd` for example, you can use the following OpsRequest: + +```yaml +kind: OpsRequest +metadata: + name: redis-sharding-switchover-ops + namespace: demo +spec: + clusterName: redis-sharding # redis-sharding is the name of the cluster + switchover: + - componentObjectName: redis-sharding-shard-5cd # componentObjectName is the name of one of the shards + candidateName: redis-sharding-shard-5cd-0 # candidateName is the name of the candidate instance + instanceName: redis-sharding-shard-5cd-1 # instanceName is the name of the primary instance + type: Switchover +``` + +:::note + +`componentObjectName` is the name of one of the shards, which is the full name of the component object. + +::: + +## Cleanup +To remove all resources created during this tutorial: + +```bash +kubectl delete cluster redis-sharding -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/_category_.yml new file mode 100644 index 00000000..f041cfad --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +position: 3 +label: Topologies +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..bc0ae33c --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,312 @@ +--- +title: Redis Replication Cluster Lifecycle Management (Stop, Start, Restart) +description: Learn how to manage Redis Replication Cluster states in KubeBlocks including stopping, starting, and restarting operations to optimize resources. +keywords: [KubeBlocks, Redis, Cluster Management, Stop, Start, Restart] +sidebar_position: 1 +sidebar_label: Lifecycle Management +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis Replication Cluster Lifecycle Management + +This guide demonstrates how to manage a Redis Replication Cluster's operational state in **KubeBlocks**, including: + +- Stopping the cluster to conserve resources +- Starting a stopped cluster +- Restarting cluster components + +These operations help optimize resource usage and reduce operational costs in Kubernetes environments. + +Lifecycle management operations in KubeBlocks: + +| Operation | Effect | Use Case | +|-----------|--------|----------| +| Stop | Suspends cluster, retains storage | Cost savings, maintenance | +| Start | Resumes cluster operation | Restore service after pause | +| Restart | Recreates pods for component | Configuration changes, troubleshooting | + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Replication Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Cluster Lifecycle Operations + +### Stopping the Cluster + +Stopping a Redis Replication Cluster in KubeBlocks will: + +1. Terminates all running pods +2. Retains persistent storage (PVCs) +3. Maintains cluster configuration + +This operation is ideal for: +- Temporary cost savings +- Maintenance windows +- Development environment pauses + + + + + +Option 1: OpsRequest API + +Create a Stop operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-stop-ops + namespace: demo +spec: + clusterName: redis-replication + type: Stop +``` + + + + +Option 2: Cluster API Patch + +Modify the cluster spec directly by patching the stop field: + +```bash +kubectl patch cluster redis-replication -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +}, +{ + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true +} +]' +``` + + + + + +### Verifying Cluster Stop + +To confirm a successful stop operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Stopping 6m3s + redis-replication redis Delete Stopped 6m55s + ``` + +2. Verify no running pods: + ```bash + kubectl get pods -n demo + ``` + Example Output: + ```bash + No resources found in demo namespace. + ``` + +3. Confirm persistent volumes remain: + ```bash + kubectl get pvc -n demo + ``` + Example Output: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES + data-redis-replication-redis-0 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-1 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-0 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-1 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-2 Bound pvc-uuid 20Gi RWO + ``` + +### Starting the Cluster + +Starting a stopped Redis Replication Cluster: +1. Recreates all pods +2. Reattaches persistent storage +3. Restores service endpoints + +Expected behavior: +- Cluster returns to previous state +- No data loss occurs +- Services resume automatically + + + + +Initiate a Start operation request: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-start-ops + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: redis-replication + type: Start +``` + + + + + +Modify the cluster spec to resume operation: +1. Set stop: false, or +2. Remove the stop field entirely + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + + +### Verifying Cluster Start + +To confirm a successful start operation: + +1. Check cluster status transition: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + Example Output: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Updating 22m + redis-replication redis Delete Running 22m + ``` + +2. Verify pod recreation: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication + ``` + Example Output: + ```bash + NAME READY STATUS RESTARTS AGE + redis-replication-redis-0 1/1 Running 0 2m + redis-replication-redis-1 1/1 Running 0 1m + ``` + +3. Check service endpoints: + ```bash + kubectl get endpoints redis-replication-redis-redis -n demo + ``` + +### Restarting Cluster + +Restart operations provide: +- Pod recreation without full cluster stop +- Component-level granularity +- Minimal service disruption + +Use cases: +- Configuration changes requiring restart +- Resource refresh +- Troubleshooting + +**Using OpsRequest API** + +Target specific components `redis` and `redis-sentinel` for restart: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restart-ops + namespace: demo +spec: + clusterName: redis-replication + type: Restart + restart: + - componentName: redis + - componentName: redis-sentinel +``` + +**Verifying Restart Completion** + +To verify a successful component restart: + +1. Track OpsRequest progress: + ```bash + kubectl get opsrequest redis-replication-restart-ops -n demo -w + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-restart-ops Restart redis-replication Running 0/2 10s + redis-replication-restart-ops Restart redis-replication Running 1/2 65s + redis-replication-restart-ops Restart redis-replication Running 2/2 2m5s + redis-replication-restart-ops Restart redis-replication Succeed 2/2 2m5s + ``` + +2. Check pod status: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication + ``` + Note: Pods will show new creation timestamps after restart + +3. Verify component health: + ```bash + kbcli cluster describe redis-replication -n demo + ``` + +Once the operation is complete, the cluster will return to the Running state. + + +To restart pods for Redis Component only, you can use: +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restart-redis + namespace: demo +spec: + clusterName: redis-replication + type: Restart + restart: + - componentName: redis +``` + +## Summary +In this guide, you learned how to: +1. Stop a Redis Replication Cluster to suspend operations while retaining persistent storage. +2. Start a stopped cluster to bring it back online. +3. Restart specific cluster components to recreate their Pods without stopping the entire cluster. + +By managing the lifecycle of your Redis Replication Cluster, you can optimize resource utilization, reduce costs, and maintain flexibility in your Kubernetes environment. KubeBlocks provides a seamless way to perform these operations, ensuring high availability and minimal disruption. diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..f1fd30e6 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,178 @@ +--- +title: Vertical Scaling in a Redis Replication Cluster +description: Learn how to perform vertical scaling in a Redis Replication Cluster managed by KubeBlocks to optimize resource utilization and improve performance. +keywords: [KubeBlocks, Redis, Vertical Scaling, Kubernetes, Resources] +sidebar_position: 2 +sidebar_label: Vertical Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Vertical Scaling for Redis Replication Clusters with KubeBlocks + +This guide demonstrates how to vertically scale a Redis Replication Cluster managed by KubeBlocks by adjusting compute resources (CPU and memory) while maintaining the same number of replicas. + +Vertical scaling modifies compute resources (CPU and memory) for Redis instances while maintaining replica count. Key characteristics: + +- **Non-disruptive**: When properly configured, maintains availability during scaling +- **Granular**: Adjust CPU, memory, or both independently +- **Reversible**: Scale up or down as needed + +KubeBlocks orchestrates scaling with minimal impact: +1. Secondary replicas update first +2. Primary updates last after secondaries are healthy +3. Cluster status transitions from `Updating` to `Running` + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Replication Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Vertical Scale + +**Expected Workflow**: + +1. Secondary replicas are updated first (one at a time) +1. Primary is updated last after secondary replicas are healthy +1. Cluster status transitions from `Updating` to `Running` + + + + Option 1: Using VerticalScaling OpsRequest + + Apply the following YAML to scale up the resources for the redis component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-vscale-ops + namespace: demo + spec: + clusterName: redis-replication + type: VerticalScaling + verticalScaling: + - componentName: redis + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + What Happens During Vertical Scaling? + - Secondary Pods are recreated first to ensure the primary Pod remains available. + - Once all secondary Pods are updated, the primary Pod is restarted with the new resource configuration. + + + You can check the progress of the scaling operation with the following command: + + ```bash + kubectl -n demo get ops redis-replication-vscale-ops -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-vscale-ops VerticalScaling redis-replication Running 0/2 11s + redis-replication-vscale-ops VerticalScaling redis-replication Running 1/2 36s + redis-replication-vscale-ops VerticalScaling redis-replication Running 2/2 52s + redis-replication-vscale-ops VerticalScaling redis-replication Running 2/2 52s + redis-replication-vscale-ops VerticalScaling redis-replication Succeed 2/2 52s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + limits: + cpu: "1" # Update the resources to your need. + memory: "1Gi" # Update the resources to your need. + ... + ``` + + + +## Best Practices & Considerations + +**Planning:** +- Scale during maintenance windows or low-traffic periods +- Verify Kubernetes cluster has sufficient resources +- Check for any ongoing operations before starting + +**Execution:** +- Maintain balanced CPU-to-Memory ratios +- Set identical requests/limits for guaranteed QoS + +**Post-Scaling:** +- Monitor resource utilization and application performance +- Consider adjusting Redis parameters if needed + +## Verification +Verify the updated resources by inspecting the cluster configuration or Pod details: +```bash +kbcli cluster describe redis-replication -n demo +``` + +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +redis 1 / 1 1Gi / 1Gi data:20Gi +redis-sentinel 500m / 500m 512Mi / 512Mi data:20Gi +``` + +Only resources for Redis component have been updated, but those for redis-sentinel remain the same. + +## Key Benefits of Vertical Scaling with KubeBlocks +- Seamless Scaling: Pods are recreated in a specific order to ensure minimal disruption. +- Dynamic Resource Adjustments: Easily scale CPU and memory based on workload requirements. +- Flexibility: Choose between OpsRequest for dynamic scaling or direct API updates for precise control. +- Improved Availability: The cluster remains operational during the scaling process, maintaining high availability. + +## Cleanup +To remove all created resources, delete the Redis Replication Cluster along with its namespace: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you learned how to: +1. Deploy a Redis Replication Cluster managed by KubeBlocks. +2. Perform vertical scaling by increasing or decreasing resources for the redis component. +3. Use both OpsRequest and direct Cluster API updates to adjust resource allocations. + +Vertical scaling is a powerful tool for optimizing resource utilization and adapting to changing workload demands, ensuring your Redis Replication Cluster remains performant and resilient. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..8a579536 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,279 @@ +--- +title: Horizontal Scaling of Redis Clusters with KubeBlocks +description: Learn how to perform horizontal scaling (scale-out and scale-in) on a Redis cluster managed by KubeBlocks using OpsRequest and direct Cluster API updates. +keywords: [KubeBlocks, Redis, Horizontal Scaling, Scale-Out, Scale-In, Kubernetes] +sidebar_position: 3 +sidebar_label: Horizontal Scaling +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Horizontal Scaling for Redis Clusters with KubeBlocks + +This guide explains how to perform horizontal scaling (scale-out and scale-in) on a Redis cluster managed by KubeBlocks. You'll learn how to use both **OpsRequest** and direct **Cluster API** updates to achieve this. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Replication Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + + +## Scale-out (Add Replicas) + +**Expected Workflow**: + +1. New pod is provisioned, and transitions from `Pending` to `Running` with `secondary` role +2. Data synced from primary to new replica +3. Cluster status changes from `Updating` to `Running` + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale out the Redis cluster by adding 1 replica to redis component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-scale-out-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 + ``` + + Monitor the progress of the scaling operation: + + ```bash + kubectl get ops redis-replication-scale-out-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-scale-out-ops HorizontalScaling redis-replication Running 0/1 9s + redis-replication-scale-out-ops HorizontalScaling redis-replication Running 1/1 20s + redis-replication-scale-out-ops HorizontalScaling redis-replication Succeed 1/1 20s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 3 # increase replicas to scale-out + ... + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + +### Verify Scale-Out + +After applying the operation, you will see a new pod created and the Redis cluster status goes from `Updating` to `Running`, and the newly created pod has a new role `secondary`. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication +``` + +Example Output (3 Pods): +```bash +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 9m47s +redis-replication-redis-1 3/3 Running 0 10m +redis-replication-redis-2 3/3 Running 0 4m48s +redis-replication-redis-sentinel-0 2/2 Running 0 16m +redis-replication-redis-sentinel-1 2/2 Running 0 16m +redis-replication-redis-sentinel-2 2/2 Running 0 17m +``` + +New replicas automatically join as secondary nodes. +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 10m secondary +redis-replication-redis-1 3/3 Running 0 11m primary +redis-replication-redis-2 3/3 Running 0 5m27s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 17m +redis-replication-redis-sentinel-1 2/2 Running 0 17m +redis-replication-redis-sentinel-2 2/2 Running 0 17m +``` + +## Scale-in (Remove Replicas) + +**Expected Workflow**: + +1. Selected replica (the one with the largest ordinal) is removed +2. If removing a primary replica, automatic switchover occurs first +3. Pod is terminated gracefully +4. Cluster status changes from `Updating` to `Running` + +:::note +If the replica being scaled-in happens to be a primary replica, KubeBlocks will trigger a Switchover actions. And this pod will not be terminated until this Switchover action succeeds. +::: + + + + + + Option 1: Using Horizontal Scaling OpsRequest + + Scale in the Redis cluster by removing ONE replica: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-scale-in-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # remove one replica from current component + replicaChanges: 1 + ``` + + Monitor progress: + ```bash + kubectl get ops redis-replication-scale-in-ops -n demo -w + ``` + + Expected Result: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-scale-in-ops HorizontalScaling redis-replication Running 0/1 8s + redis-replication-scale-in-ops HorizontalScaling redis-replication Running 1/1 24s + redis-replication-scale-in-ops HorizontalScaling redis-replication Succeed 1/1 24s + ``` + + + + + Option 2: Direct Cluster API Update + + Alternatively, you can perform a direct update to the `replicas` field in the Cluster resource: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 1 # decrease replicas to scale-out + ``` + + Or you can patch the cluster CR with command: + + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### Verify Scale-In + +Example Output (ONE Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 16m +``` + +## Troubleshooting +If the scale-in operation gets stucked for quite a long time, please check these resources: + +```bash +# Check agent logs on both current primary and candidate +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +If you get errors like the following from the primary replica: +```text: +INFO Action Executed {"action": "switchover", "result": "exit code: 1: failed"} +INFO HTTP API Called {"user-agent": "Go-http-client/1.1", "method": "POST", "path": "/v1.0/action", "status code": 200, "cost": 7} +``` + +It could be a switchover error, and please check KubeBlocks logs for more details. + +## Best Practices + +When performing horizontal scaling: +- Scale during low-traffic periods when possible +- Monitor cluster health during scaling operations +- Verify sufficient resources exist before scaling out +- Consider storage requirements for new replicas + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary +In this guide you learned how to: +- Perform scale-out operations to add replicas to a Redis cluster. +- Perform scale-in operations to remove replicas from a Redis cluster. +- Use both OpsRequest and direct Cluster API updates for horizontal scaling. + +KubeBlocks ensures seamless scaling with minimal disruption to your database operations. with minimal disruption to your database operations. diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..3a2e1049 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx @@ -0,0 +1,237 @@ +--- +title: Expanding Volume in a Redis Cluster +description: Learn how to expand Persistent Volume Claims (PVCs) in a Redis cluster managed by KubeBlocks without downtime. +keywords: [KubeBlocks, Redis, Volume Expansion, Kubernetes, PVC] +sidebar_position: 4 +sidebar_label: Volume Expansion +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Expanding Volume in a Redis Cluster + +This guide explains how to expand Persistent Volume Claims (PVCs) in a Redis cluster managed by **KubeBlocks**. Volume expansion enables dynamic storage capacity increases, allowing your database to scale seamlessly as data grows. When supported by the underlying storage class, this operation can be performed without downtime. + +Volume expansion allows you to increase the size of a Persistent Volume Claim (PVC) after it has been created. This feature was introduced in Kubernetes v1.11 and became generally available (GA) in Kubernetes v1.24. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### Check the Storage Class for Volume Expansion Support + +List all available storage classes and verify if volume expansion is supported by checking the `ALLOWVOLUMEEXPANSION` field: +```bash +kubectl get storageclass +``` + +Example Output: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +Ensure the storage class you are using has `ALLOWVOLUMEEXPANSION` set to true. If it is false, the storage class does not support volume expansion. + +## Deploy a Redis Replication Cluster with StorageClass + +KubeBlocks uses a declarative approach to manage Redis clusters. Below is an example configuration for deploying a Redis cluster with 2 replicas (1 primary, 1 secondary). + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # specify storage class name supports Volume Expansion + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `storageClassName`: Specifies `StorageClass` name that supports volume expansion. If not set, the StorageClass annotated `default` will be used. + +:::note +**ALLOWVOLUMEEXPANSION** + +Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`) when creating cluster. + +::: + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Expand volume + +:::note +1. Ensure the storage class supports volume expansion (check `ALLOWVOLUMEEXPANSION`). +2. The new size must be larger than the current size. +3. Volume expansion may require additional configurations depending on the storage provider. +::: + +You can expand the volume in one of two ways: + + + + + Option 1: Using VolumeExpansion OpsRequest + + Apply the following YAML to increase the volume size for the redis component: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expand-volume-ops + namespace: demo + spec: + clusterName: redis-replication + type: VolumeExpansion + volumeExpansion: + - componentName: redis + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + Monitor the expansion progress with: + + ```bash + kubectl describe ops redis-replication-expand-volume-ops -n demo + ``` + + Expected Result: + ```bash + Status: + Phase: Succeed + ``` + Once completed, the PVC size will be updated. + + :::note + If the storage class you use does not support volume expansion, this OpsRequest fails fast with information like: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + Option 2: Direct Cluster API Update + + Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + + ```yaml + componentSpecs: + - name: redis + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # specify new size, and make sure it is larger than current size + storage: 30Gi + ``` + KubeBlocks will automatically update the PVC size based on the new specifications. + + + +## Verification + +Verify the updated cluster configuration: +```bash +kbcli cluster describe redis-replication -n demo +``` +Expected Output: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +redis 500m / 500m 512Mi / 512Mi data:30Gi +``` +The volume size for the data PVC has been updated to the specified value (e.g., 30Gi in this case). + +Confirm PVC resizing completion: +```bash +kubectl get pvc -l app.kubernetes.io/instance=redis-replication -n demo +``` +Expected Output: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +redis-replication-redis-data-0 Bound pvc-xxxxxxxx 30Gi RWO 33m +redis-replication-redis-data-1 Bound pvc-xxxxxxxx 30Gi RWO 33m +``` + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary + +In this guide you learned how to: +1. Verify storage class compatibility for volume expansion. +2. Perform volume expansion using either: + - OpsRequest for dynamic updates. + - Cluster API for manual updates. +3. Verify the updated PVC size and ensure the resize operation is complete. + +With volume expansion, you can efficiently scale your Redis cluster's storage capacity without service interruptions, ensuring your database can grow alongside your application needs. + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..ba111c0f --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,330 @@ +--- +title: Create and Destroy Redis Service Using the Declarative Cluster API in KubeBlocks +description: Learn how to configure and manage Redis services in KubeBlocks for external and internal access using LoadBalancer and other service types. +keywords: [KubeBlocks, Redis, LoadBalancer, External Service, Expose, Kubernetes] +sidebar_position: 5 +sidebar_label: Manage Redis Services +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Manage Redis Services Using the Declarative Cluster API in KubeBlocks + +This guide provides step-by-step instructions for exposing Redis services managed by KubeBlocks, both externally and internally. You'll learn to configure external access using cloud provider LoadBalancer services, manage internal services, and properly disable external exposure when no longer needed. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Replication Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## View Network Services +List the Services created for the Redis cluster: +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +Example Services: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +redis-replication-redis-redis ClusterIP 10.96.102.140 6379/TCP 31s +redis-replication-redis-sentinel-redis-sentinel ClusterIP 10.96.157.4 26379/TCP 51s +``` + +## Expose Redis Service + +External service addresses enable public internet access to Redis, while internal service addresses restrict access to the user's VPC. + +### Service Types Comparison + +| Type | Use Case | Cloud Cost | Security | +|------|----------|------------|----------| +| ClusterIP | Internal service communication | Free | Highest | +| NodePort | Development/testing | Low | Moderate | +| LoadBalancer | Production external access | High | Managed via security groups | + + + + + + + Option 1: Using OpsRequest + + To expose the Redis service externally using a LoadBalancer, create an OpsRequest resource: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: redis-replication + expose: + - componentName: redis + services: + - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are 'ClusterIP', 'NodePort', and 'LoadBalancer'. + serviceType: LoadBalancer + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for AWS EKS + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + # Specifies a role to target with the service. + # If specified, the service will only be exposed to pods with the matching + # role. + roleSelector: primary + switch: Enable + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops redis-replication-expose-enable-ops -n demo + ``` + + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-expose-enable-ops Expose redis-replication Succeed 1/1 31s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the `spec.services` section in the Cluster resource to include a LoadBalancer service: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: redis-replication + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + # expose a external service + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # or "true" for an internal VPC IP + componentSelector: redis + name: redis-internet + serviceName: redis-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: redis + port: 6379 + protocol: TCP + targetPort: redis + type: LoadBalancer + componentSpecs: + ... + ``` + The YAML configuration above adds a new external service under the services section. This LoadBalancer service includes annotations for AWS Network Load Balancer (NLB). + + :::note + Cloud Provider Annotations + + When using a LoadBalancer service, you must include the appropriate annotations specific to your cloud provider. Below is a list of commonly used annotations for different cloud providers: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # Use "false" for internet-facing LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # Restricts the LoadBalancer to internal VPC access only. Defaults to internet-facing if not specified. + cloud.google.com/l4-rbs: "enabled" # Optimization for internet-facing LoadBalancer + ``` + + - Alibaba Cloud + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # Use "intranet" for internal-facing LoadBalancer + ``` + ::: + + + :::note + The `service.beta.kubernetes.io/aws-load-balancer-internal` annotation controls whether the LoadBalancer is internal or internet-facing. Note that this annotation cannot be modified dynamically after service creation. + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # Use "true" for internal VPC IPs + ``` + If you change this annotation from "false" to "true" after the Service is created, the annotation may update in the Service object, but the LoadBalancer will still retain its public IP. + + To properly modify this behavior: + - First, delete the existing LoadBalancer service. + - Recreate the service with the updated annotation (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true"). + - Wait for the new LoadBalancer to be provisioned with the correct internal or external IP. + ::: + + + Wait for the Cluster status to transition to Running using the following command: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Running 18m + ``` + + + + +### Verify the Exposed Service +Check the service details to confirm the LoadBalancer service is created: + +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +Example Output: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +redis-replication-redis-internet LoadBalancer 172.20.60.24 6379:31243/TCP 1m +``` + +### Wait for DNS Propagation + +The LoadBalancer DNS name may take 2-5 minutes to become resolvable. Verify the resolution status: + +```bash +nslookup # replace with the real IP from previous output. +``` + +## Connect to Redis Externally + +### Retrieve Credentials + +KubeBlocks automatically creates a Secret containing the Redis default credentials. Retrieve the Redis default credentials: +```bash +NAME=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d` +``` + +### Connect Using Redis Client + +You can now connect to the Redis database externally (e.g., from your laptop or EC2): +```bash +redis-cli -h -a ${PASSWD} +``` + +## Disable External Exposure + + + + + + Option 1: Using OpsRequest + + To disable external access, create an OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expose-disable-ops + namespace: demo + spec: + clusterName: redis-replication + expose: + - componentName: redis + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + Wait for the OpsRequest to complete: + ```bash + kubectl get ops redis-replication-expose-disable-ops -n demo + ``` + Example Output: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-expose-disable-ops Expose redis-replication Succeed 1/1 12s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, remove the `spec.services` field from the Cluster resource: + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + Monitor the cluster status until it is Running: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Running 23m + ``` + + + +### Verify Service Removal + +Ensure that the 'redis-replication-redis-internet' Service is removed: + +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +Expected Result: The 'redis-replication-redis-internet' Service should be removed. + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to: +- Expose a Redis service externally or internally using KubeBlocks. +- Configure LoadBalancer services with cloud provider-specific annotations. +- Manage external access by enabling or disabling services via OpsRequest or direct updates to the Cluster API. + +KubeBlocks provides flexibility and simplicity for managing MySQL services in Kubernetes environments. simplicity for managing Redis services in Kubernetes environments. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx new file mode 100644 index 00000000..1d0fd001 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx @@ -0,0 +1,126 @@ +--- +title: Modify Redis Parameters +description: Learn how to modify dynamic and static Redis parameters in KubeBlocks using Reconfiguring OpsRequest to optimize database performance and availability. +keywords: [Redis, KubeBlocks, OpsRequest, dynamic parameters, static parameters, database configuration] +sidebar_position: 7 +sidebar_label: Modify Redis Parameters +--- + +# Modify Redis Parameters + +Database reconfiguration involves modifying parameters, settings, or configurations to optimize performance, security, or availability. Parameter changes fall into two categories: + +| Type | Restart Required | Scope | Example Parameters | +|------|------------------|-------|--------------------| +| **Dynamic** | No | Immediate effect | `max_connections` | +| **Static** | Yes | After restart | `shared_buffers` | + +For static parameters, KubeBlocks minimizes downtime by: +1. Modifying and restarting replica nodes first +2. Performing a switchover to promote the updated replica as primary (typically completes in milliseconds) +3. Restarting the original primary node + +:::note + +KubeBlocks Redis Addon does not implement any dynamic reload action for `Dynamic Parameters`, thus changes on any parameters will cause a restart. + +::: + +This guide demonstrates how to modify static parameters of a Redis cluster managed by KubeBlocks using a Reconfiguring OpsRequest. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Cluster + +import CreatePGCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyPGCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Check Parameter Values + +### Retrieve Credentials +KubeBlocks automatically creates a secret containing the Redis root credentials. Retrieve the credentials with the following commands: +```bash +NAME=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d` +``` + +### Access Redis Cluster +To connect to the cluster's primary node, use the Redis client: +```bash +kubectl exec -it -n demo redis-replication-redis-0 -c redis -- redis-cli -a ${PASSWD} +``` + +### Query Parameter Values + +Once connected, you can query the current value of 'max_connections' and 'shared_buffers': +```sql +127.0.0.1:6379> CONFIG GET aof-timestamp-enabled +1) "aof-timestamp-enabled" +2) "no" +``` + +## Static Parameter Example: Modifying aof-timestamp-enabled + +Create a Reconfigure OpsRequest. Apply the following OpsRequest YAML to set the 'aof-timestamp-enabled' to 'yes': + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-reconfigure-static + namespace: demo +spec: + clusterName: redis-replication + reconfigures: + - componentName: redis + parameters: + - key: aof-timestamp-enabled + value: 'yes' + type: Reconfiguring +``` + +Check the status of the OpsRequest until it completes: + +```bash +kubectl get ops redis-reconfigure-static -n demo -w +``` + +Example Output: +```bash +redis-reconfigure-static Reconfiguring redis-replication Running -/- 5s +redis-reconfigure-static Reconfiguring redis-replication Succeed -/- 33s +``` + +**Verify the Configuration Change** + +Log into the Redis instance and confirm that the `aof-timestamp-enabled` parameter has been updated: + +```sql +127.0.0.1:6379> CONFIG GET aof-timestamp-enabled +1) "aof-timestamp-enabled" +2) "yes" +``` + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary +This guide covered modifying Redis parameters through KubeBlocks: +- Static changes require restart but with minimal downtime +- All changes are validated before application +- Configuration follows declarative management principles diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/08-switchover.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/08-switchover.mdx new file mode 100644 index 00000000..aae02aa1 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/08-switchover.mdx @@ -0,0 +1,179 @@ +--- +title: Redis Cluster Switchover +description: Perform planned role transitions in Redis clusters with KubeBlocks for minimal downtime and controlled maintenance +keywords: [Redis, KubeBlocks, Switchover, High Availability, Role Transition, Kubernetes] +sidebar_position: 8 +sidebar_label: Redis Switchover +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis Cluster Switchover + +A **switchover** is a planned operation that transfers the primary role from one Redis instance to another. Unlike failover which occurs during failures, switchover provides: +- Controlled role transitions +- Minimal downtime (typically a few hundred milliseconds) +- Predictable maintenance windows + +Switchover is ideal for: +- Node maintenance/upgrades +- Workload rebalancing +- Testing high availability +- Planned infrastructure changes + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Check Roles +List the Pods and their roles (primary or secondary): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis -L kubeblocks.io/role +``` + +Example Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 4/4 Running 0 9m59s primary +redis-replication-redis-1 4/4 Running 0 11m secondary +``` + +## Performing a Planned Switchover + +To initiate a planned switchover, create an OpsRequest resource as shown below: + + + + Option 1: Automatic Switchover (No preferred candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-switchover-ops + namespace: demo + spec: + clusterName: redis-replication + type: Switchover + switchover: + - componentName: redis + instanceName: redis-replication-redis-0 + ``` + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + + + + Option 2: Targeted Switchover (Specific candidate) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-switchover-targeted + namespace: demo + spec: + clusterName: redis-replication + type: Switchover + switchover: + - componentName: redis + # Specifies the instance whose role will be transferred. + # A typical usage is to transfer the leader role in a consensus system. + instanceName: redis-replication-redis-0 + # If CandidateName is specified, the role will be transferred to this instance. + # The name must match one of the pods in the component. + # Refer to ComponentDefinition's Swtichover lifecycle action for more details. + candidateName: redis-replication-redis-1 + ``` + + **Key Parameters:** + - `instanceName`: Specifies the instance (Pod) that is primary or leader before a switchover operation. + - `candidateName`: If candidate name is specified, the role will be transferred to this instance. + + + +## Monitoring the Switchover + +Monitor the switchover progress: + +```bash +kubectl get ops redis-switchover-ops -n demo -w +``` + +Expected Result: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +redis-switchover-ops Switchover redis-replication Succeed 1/1 33s +``` +## Verify the Switchover + +After the switchover is executed, the specified instance will be promoted to the primary role, while the previously primary instance will take on the secondary role. + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis -L kubeblocks.io/role +``` + +Expected Output: + +```text +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 4/4 Running 0 19m59s secondary +redis-replication-redis-1 4/4 Running 0 21m primary +``` + +In this example: +- Pod 'redis-replication-redis-1' has been promoted to the primary role. +- Pod 'redis-replication-redis-0' has transitioned to the secondary role. + +## Troubleshooting + +### Common Switchover Issues + +If the switchover operation gets stuck, check these resources: +```bash +# Check agent logs on both current primary and candidate +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# Check cluster events for errors +kubectl get events -n demo --field-selector involvedObject.name=redis-replication + +# Check kubeblocks logs +kubectl -n kb-system logs deploy/kubeblocks +``` + +## Summary + +This guide demonstrated how to: +1. Deploy a Redis HA cluster +2. Perform both automatic and targeted Switchover +3. Verify role transitions + +**Key takeaways:** +- Switchover enables controlled maintenance with minimal downtime (~100-500ms) +- KubeBlocks provides declarative operations for reliable role transitions +- Always verify: + - Cluster status immediately after switchover + - Application connectivity + - Replication health +- Check logs for troubleshooting: + - KubeBlocks operator (kb-system namespace) + - kbagent on database pods + diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..87d73387 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,131 @@ +--- +title: Decommission a Specific Pod in KubeBlocks-Managed Redis Clusters +description: Learn how to decommission (take offline) a specific Pod in a Redis cluster managed by KubeBlocks. +keywords: [KubeBlocks, Redis, Decommission Pod, Horizontal Scaling, Kubernetes] +sidebar_position: 9 +sidebar_label: Decommission Redis Replica +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +# Decommission a Specific Pod in KubeBlocks-Managed Redis Clusters + +This guide explains how to decommission (take offline) specific Pods in Redis clusters managed by KubeBlocks. Decommissioning provides precise control over cluster resources while maintaining availability. Use this for workload rebalancing, node maintenance, or addressing failures. + +## Why Decommission Pods with KubeBlocks? + +In traditional StatefulSet-based deployments, Kubernetes lacks the ability to decommission specific Pods. StatefulSets ensure the order and identity of Pods, and scaling down always removes the Pod with the highest ordinal number (e.g., scaling down from 3 replicas removes `Pod-2` first). This limitation prevents precise control over which Pod to take offline, which can complicate maintenance, workload distribution, or failure handling. + +KubeBlocks overcomes this limitation by enabling administrators to decommission specific Pods directly. This fine-grained control ensures high availability and allows better resource management without disrupting the entire cluster. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Decommission a Pod + +**Expected Workflow**: + +1. Replica specified in `onlineInstancesToOffline` is removed +2. Pod terminates gracefully +3. Cluster transitions from `Updating` to `Running` + +To decommission a specific Pod (e.g., 'redis-replication-redis-1'), you can use one of the following methods: + + + + + + Option 1: Using OpsRequest + + Create an OpsRequest to mark the Pod as offline: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-decommission-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + scaleIn: + onlineInstancesToOffline: + - 'redis-replication-redis-1' # Specifies the instance names that need to be taken offline + ``` + + #### Monitor the Decommissioning Process + Check the progress of the decommissioning operation: + + ```bash + kubectl get ops redis-replication-decommission-ops -n demo -w + ``` + Example Output: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-decommission-ops HorizontalScaling redis-replication Succeed 1/1 71s + ``` + + + + + + Option 2: Using Cluster API + + Alternatively, update the Cluster resource directly to decommission the Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + replicas: 1 # explected replicas after decommission + offlineInstances: + - redis-replication-redis-1 # <----- Specify Pod to be decommissioned + ... + ``` + + + + +### Verify the Decommissioning + +After applying the updated configuration, verify the remaining Pods in the cluster: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication +``` + +Example Output: +```bash +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 33m33s +``` + +## Summary +Key takeaways: +- Traditional StatefulSets lack precise Pod removal control +- KubeBlocks enables targeted Pod decommissioning +- Two implementation methods: OpsRequest or Cluster API + +This provides granular cluster management while maintaining availability. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/_category_.yml new file mode 100644 index 00000000..5c4d52e0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/04-operations/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: Operations +collapsible: true +collapsed: false \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..c14f8d53 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,125 @@ +--- +title: Create a Backup Repository for KubeBlocks +description: Learn how to create and configure a BackupRepo for KubeBlocks using an S3 bucket to store backup data. +keywords: [KubeBlocks, Backup, BackupRepo, S3, Kubernetes] +sidebar_position: 1 +sidebar_label: Create BackupRepo +--- + +# Create a BackupRepo for KubeBlocks + +This guide walks you through creating and configuring a BackupRepo in KubeBlocks using an S3 bucket for storing backup data. + +## Prerequisites +- AWS CLI configured with appropriate permissions to create S3 buckets. +- kubectl access to your Kubernetes cluster. +- KubeBlocks installed ([installation guide](../user_docs/overview/install-kubeblocks)) and running in the kb-system namespace. + +## Step 1: Create S3 Bucket + +Use the AWS CLI to create an S3 bucket in your desired region. Replace `` with your target AWS region (e.g., `us-east-1`, `ap-southeast-1`). + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +Example (for us-west-1): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +Example Output: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +Verification: +Confirm the bucket was created by listing its contents (it will be empty initially): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## Step 2: Create a Kubernetes Secret for AWS Credentials + +Store your AWS credentials securely in a Kubernetes Secret. Replace `` and `` with your actual AWS credentials: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## Step 3: Configure Backup Repository + +A BackupRepo is a custom resource that defines a storage repository for backups. In this step, you'll integrate your S3 bucket with KubeBlocks by creating a BackupRepo resource. + +Apply the following YAML to create the BackupRepo. Replace fields(e.g., bucket name, region) with your specific settings. + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # mark this backuprepo as default one + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # Currently, KubeBlocks supports configuring various object storage services as backup repositories + # - s3 (Amazon Simple Storage Service) + # - oss (Alibaba Cloud Object Storage Service) + # - cos (Tencent Cloud Object Storage) + # - gcs (Google Cloud Storage) + # - obs (Huawei Cloud Object Storage) + # - minio, and other S3-compatible services. + storageProviderRef: s3 + # Specifies the access method of the backup repository. + # - Tool + # - Mount + accessMethod: Tool + # Specifies reclaim policy of the PV created by this backup repository. + pvReclaimPolicy: Retain + # Specifies the capacity of the PVC created by this backup repository. + volumeCapacity: 100Gi + # Stores the non-secret configuration parameters for the StorageProvider. + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # References to the secret that holds the credentials for the StorageProvider. + credential: + # name is unique within a namespace to reference a secret resource. + name: s3-credential-for-backuprepo + # namespace defines the space within which the secret name must be unique. + namespace: kb-system +``` + +## Step 4: Verify Backup Repository Status + +Check the status of the BackupRepo to ensure it is correctly initialized: + +```bash +kubectl get backuprepo s3-repo -w +``` + +Expected Status Flow: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +Troubleshooting: + - If status becomes Failed: + - Verify bucket name and region match your S3 configuration. + - Confirm AWS credentials in the Secret are correct. + - Check network connectivity between KubeBlocks and AWS S3. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..3cd80267 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,215 @@ +--- +title: Create a Full Backup for a Redis Cluster on KubeBlocks +description: Step-by-step guide to creating and validating full backups for Redis clusters using Backup API and OpsRequest API in KubeBlocks. +keywords: [Redis, Full Backup, KubeBlocks, Kubernetes, Database Backup, XtraBackup] +sidebar_position: 2 +sidebar_label: Create Full Backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Create a Full Backup for Redis on KubeBlocks + +This guide demonstrates how to create and validate full backups for Redis clusters on KubeBlocks using the `pg-basebackup` method through both: +- The Backup API (direct backup operations) +- The OpsRequest API (managed backup operations with enhanced monitoring) + +We will cover how to restore data from a backup in the [Restore From Full Backup](./05-restoring-from-full-backup) guide. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Backup Prerequisites + +Before creating backups, ensure: +1. Backup repository is configured: + - `BackupRepo` resource exists + - Network connectivity between cluster and repository + - `BackupRepo` status shows "Ready" + +2. Cluster is ready: + - Cluster status is "Running" + - No ongoing operations (scaling, upgrades, etc.) + +## Identify Backup Configuration + +Check available backup policies and schedules: + +```bash +# List backup policies +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=redis-replication + +# List backup schedules +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=redis-replication +``` + +Expected Output: +```bash +NAME BACKUP-REPO STATUS AGE +redis-replication-redis-backup-policy Available 17m + +NAME STATUS AGE +redis-replication-redis-backup-schedule Available 60m +``` + +View supported backup methods in the BackupPolicy CR 'redis-replication-redis-backup-policy': + +```bash +kubectl get backuppolicy redis-replication-redis-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**List of Backup methods** + +KubeBlocks Redis supports these backup methods: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | datafile | Uses `redis-cli BGSAVE` command to backup data | +| Continuous Backup | aof | Continuously perform incremental backups by archiving Append-Only Files (AOF) | + +## Backup via Backup API + +### 1. Create On-Demand Backup + +The `datafile` method uses redis `BGSAVE` command to perform a full backup and upload backup file using `datasafed push`. + +Apply this manifest to create a backup: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: redis-backup-datafile + namespace: demo +spec: + backupMethod: datafile + backupPolicyName: redis-replication-redis-backup-policy + # Determines whether the backup contents stored in the backup repository should be deleted + # when the backup custom resource(CR) is deleted. Supported values are `Retain` and `Delete`. + # - `Retain` means that the backup content and its physical snapshot on backup repository are kept. + # - `Delete` means that the backup content and its physical snapshot on backup repository are deleted. + deletionPolicy: Delete +``` + +### 2. Monitor Backup and Verify Completion + +Track progress until status shows "Completed": + +```bash +kubectl get backup redis-backup-datafile -n demo -w +``` + +Example Output: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +redis-backup-datafile redis-replication-redis-backup-policy datafile Completed 3412 10s Delete 2025-05-17T09:24:59Z 2025-05-17T09:25:08Z +``` + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Check files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Backup file size + + +## Backup via OpsRequest API + +### 1. Create On-Demand Backup + +Execute a backup using the OpsRequest API with the 'pg-basebackup' method: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-backup + namespace: demo +spec: + clusterName: redis-replication + force: false + backup: + backupPolicyName: redis-replication-redis-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. Monitor Backup Progress + +#### 1. Monitor Operation Status + +Track backup progress in real-time: +```bash +kubectl get ops redis-replication-backup -n demo -w +``` + +Expected Output: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +redis-replication-backup Backup redis-replication Succeed -/- 35s +``` + +- A STATUS of 'Succeed' indicates the backup operation completed successfully. + +#### 2. Verify Completion + +Check the final backup status: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=redis-replication-backup +``` + +Example Output: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-redis-replication-20250517092706 redis-replication-redis-backup-policy datafile Completed 3458 10s Delete 2025-05-17T09:27:06Z 2025-05-17T09:27:16Z 2025-06-16T09:27:16Z +``` + +- The backup status should show 'Completed'. + +### 3. Validate Backup + +Confirm successful completion by checking: +- Backup status shows "Completed" +- Backup size matches expectations +- Files in the BackupRepo + +The `Backup` resource records details including: +- Storage path +- Time range +- Other metadata + +## Summary + +This guide covered: +1. Deploying a replication Redis cluster +2. Creating full backups using: + - Direct Backup API + - Managed OpsRequest API +3. Monitoring and validating backups + +Your Redis data is now securely backed up and ready for restoration when needed. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..daa32302 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,150 @@ +--- +title: Setting Up a Redis Cluster with Scheduled Backups in KubeBlocks +description: Learn how to deploy a Redis cluster using KubeBlocks and configure automated scheduled backups with retention in an S3 repository. +keywords: [Redis, Backup, KubeBlocks, Scheduled Backup, Kubernetes] +sidebar_position: 3 +sidebar_label: Scheduled Backups +--- + + +# Setting Up a Redis Cluster with Scheduled Backups in KubeBlocks + +This guide demonstrates how to deploy a Redis cluster using KubeBlocks and configure scheduled backups with retention in an S3 repository. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## Configure Scheduled Backups + +KubeBlocks automatically creates a `BackupSchedule` resource when the cluster is created. Follow these steps to enable and configure scheduled backups: + +1. Verify the default backup schedule configuration: + +```bash +kubectl get backupschedule redis-replication-redis-backup-schedule -n demo -oyaml +``` + +Example Output: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: redis-replication-redis-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── minute (0-59) + # │ ┌───────────── hour (0-23) + # │ │ ┌───────────── day of month (1-31) + # │ │ │ ┌───────────── month (1-12) + # │ │ │ │ ┌───────────── day of week (0-6) (Sunday=0) + # │ │ │ │ │ + # 0 18 * * * + # schedule this job every day at 6:00 PM (18:00). + cronExpression: 0 18 * * * # update the cronExpression to your need + enabled: false # set to `true` to schedule base backup periodically + retentionPeriod: 7d # set the retention period to your need +``` + +2. Enable and customize the backup schedule: +```bash +kubectl edit backupschedule redis-replication-redis-backup-schedule -n demo +``` + +Update these key parameters: +- `enabled`: Set to `true` to activate scheduled backups +- `cronExpression`: Configure backup frequency using cron syntax +- `retentionPeriod`: Set how long to keep backups (e.g., `7d`, `1mo`) + +Example configuration for daily backups at 6PM UTC with 7-day retention: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. Verify the schedule configuration: +```bash +# Check schedule status +kubectl get backupschedule redis-replication-redis-backup-schedule -n demo -w + +# View detailed configuration +kubectl describe backupschedule redis-replication-redis-backup-schedule -n demo +``` + +## Monitoring and Managing Backups + +After enabling scheduled backups, monitor their execution and manage backup retention: + +1. View all backups: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=redis-replication +``` + +2. Inspect backup details: +```bash +kubectl describe backup -n demo +``` + +3. Verify backup artifacts: +- Status should show "Completed" +- Check backup size matches expectations +- Confirm retention period is being applied +- Validate backup files exist in repository + +4. Manage backup retention: +- To manually delete old backups: +```bash +kubectl delete backup -n demo +``` +- To modify retention period: +```bash +kubectl edit backupschedule redis-replication-redis-backup-schedule -n demo +``` + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## Summary + +This guide demonstrated: +1. Configuration of automated Redis backups +2. Schedule customization using cron syntax +3. Retention policy management +4. Backup verification procedures + +Your Redis cluster now has: +- Regular automated backups +- Configurable retention policies +- Complete backup history tracking diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..a8b95da0 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,160 @@ +--- +title: Setting Up a Redis Cluster with Scheduled Continuous Backup in KubeBlocks +description: Learn how to set up a Redis cluster with scheduled full backups and continuous incremental backups enabled in KubeBlocks. +keywords: [Redis, Backup, PITR, KubeBlocks, Kubernetes] +sidebar_position: 4 +sidebar_label: Scheduled Continuous Backup +--- + +# Setting Up a Redis Cluster with Scheduled Continuous Backup Enabled in KubeBlocks + +This guide demonstrates how to configure a Redis cluster on KubeBlocks with: + +- Scheduled full backups (base backups) +- Continuous WAL (Write-Ahead Log) archiving +- Point-In-Time Recovery (PITR) capabilities + +This combination provides comprehensive data protection with minimal recovery point objectives (RPO). + +## What is PITR? +Point-In-Time Recovery (PITR) allows you to restore a database to a specific moment in time by combining full backups with continuous binlog/wal/archive log backups. + +For details on restoring data from both full backups and continuous binlog backups, refer to the [Restore From PITR](restore-with-pitr.mdx) guide. + +## Prerequisites + +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## Prerequisites for Backup + +1. Backup Repository Configured: + - Configured `BackupRepo` + - Network connectivity between cluster and repo, `BackupRepo` status is `Ready` + +2. Cluster is Running: + - Cluster must be in `Running` state + - No ongoing operations (scaling, upgrades etc.) + +## List of Backup methods + +KubeBlocks Redis supports these backup methods: + +| Feature | Method | Description | +|-------------|--------|------------| +| Full Backup | datafile | Uses `redis-cli BGSAVE` command to backup data | +| Continuous Backup | aof | Continuously perform incremental backups by archiving Append-Only Files (AOF) | + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Enable Continuous Backup + +### Preparation: set `aof-timestamp-enabled` to `yes` +Redis Append Only Files(AOFs) record every write operation received by the server, in the order they were processed, which allows Redis to reconstruct the dataset by replaying these commands. +KubeBlocks supports continuous backup for the Redis component by archiving Append-Only Files (AOF). It will process incremental AOF files, update base AOF file, purge expired files and save backup status (records metadata about the backup process, such as total size and timestamps, to the `Backup` resource). + +Before enabling a continuous backup, you must set variable `aof-timestamp-enabled` to `yes`. + +```yaml +# cat examples/redis/reconfigure-aof.yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-reconfigure-aof + namespace: demo +spec: + clusterName: redis-replication + reconfigures: + - componentName: redis + parameters: + # Represents the name of the parameter that is to be updated. + - key: aof-timestamp-enabled + value: 'yes' + type: Reconfiguring +``` + +:::note +Once `aof-timestamp-enabled` is on, Redis will include timestamp in the AOF file. +It may have following side effects: storage overhead, performance overhead (write latency). +It is not recommended to enable this feature when you have high write throughput, or you have limited storage space. +::: + +### Update BackupSchedule + +Update `BackupSchedule` to schedule enable(`enabled`) backup methods and set the time (`cronExpression`) to your need: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: redis-replication-redis-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── minute (0-59) + # │ ┌───────────── hour (0-23) + # │ │ ┌───────────── day of month (1-31) + # │ │ │ ┌───────────── month (1-12) + # │ │ │ │ ┌───────────── day of week (0-6) (Sunday=0) + # │ │ │ │ │ + # 0 18 * * * + # schedule this job every day at 6:00 PM (18:00). + cronExpression: 0 18 * * * # update the cronExpression to your need + enabled: true # set to `true` to schedule base backup periodically + retentionPeriod: 7d # set the retention period to your need + - backupMethod: aof + cronExpression: '*/30 * * * *' + enabled: true # set to `true` to enable continuous backup + name: aof + retentionPeriod: 8d # by default, retentionPeriod of continuous backup is 1d more than that of a full backup. +``` + +1. **Full Backups** (datafile): + - Use redis `BGSAVE` command to perform a full backup + - Runs on configured schedule (daily by default) + - Serves as base for PITR + +2. **Continuous Backups** (archive-oplog): + - Continuously processing incremental AOF files, update base AOF file, purge expired files + - Maintains backup metadata including size and time ranges + +## Monitoring Continuous Backups + +Verify continuous backup operation with these commands: +```bash +# get continuous backup +kubectl get backup -l app.kubernetes.io/instance=redis-replication,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# get pod working for continuous backup +kubectl get pod -l app.kubernetes.io/instance=redis-replication,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +## Summary + +This guide covered: +1. Configuring scheduled full backups with pg-basebackup +2. Enabling continuous WAL archiving with wal-g-archive +3. Setting up Point-In-Time Recovery (PITR) capabilities +4. Monitoring backup operations + +Key Benefits: +- Scheduled full backups ensure regular recovery points +- Continuous WAL archiving minimizes potential data loss +- PITR enables recovery to any moment in time \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..5009c4fe --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,179 @@ +--- +title: Restore a Redis Cluster from Backup +description: Learn how to restore a new Redis cluster from an existing backup in KubeBlocks using the Cluster Annotation or OpsRequest API. +keywords: [Redis, Restore, Backup, KubeBlocks, Kubernetes] +sidebar_position: 5 +sidebar_label: Restore Redis Cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a Redis Cluster from Backup + +This guide demonstrates two methods to restore a Redis cluster from backup in KubeBlocks: + +1. **Cluster Annotation Method** - Simple declarative approach using YAML annotations +2. **OpsRequest API Method** - Enhanced operational control with progress monitoring + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Preparing for Restoration: Locate one Full Backup +Before restoring, ensure that there is a full backup available. The restoration process will use this backup to create a new Redis cluster. + +- Backup repository accessible from new cluster +- Valid full backup in `Completed` state +- Adequate CPU/memory resources +- Sufficient storage capacity + +Find available full backups: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=redis-replication # get the list of full backups +``` + +Pick ONE of the Backups whose status is `Completed`. + +## Option 1: Cluster Annotation Restoration + +### Step 1: Create Restored Cluster +Create a new cluster with restore configuration: + +Key parameters: +- `kubeblocks.io/restore-from-backup` annotation +- Backup name and namespace located from the previous steps + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication-restore + namespace: demo + annotations: + kubeblocks.io/restore-from-backup: '{"redis":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 2: Monitor Restoration +Track restore progress with: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +## Option 2: OpsRequest API Restoration + +### Step 1: Initiate Restore Operation +Create restore request via OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restore + namespace: demo +spec: + clusterName: pg-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### Step 2: Track Operation Progress +Monitor restore status: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +### Step 3: Validate Restored Cluster +Confirm successful restoration: +```bash +kubectl get cluster redis-replication-restored -n demo +``` +Example Output: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication-restored redis Delete Running 3m2s +``` + + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete cluster redis-replication-restored -n demo +kubectl delete ns demo +``` + +## Summary + +This guide covered two restoration methods: + +1. **Cluster Annotation** - Simple YAML-based approach + - Retrieve system credentials + - Create cluster with restore annotation + - Monitor progress + +2. **OpsRequest API** - Enhanced operational control + - Create restore request + - Track operation status + - Verify completion diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..0248dc19 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,200 @@ +--- +title: Restore a Redis Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks +description: Learn how to restore a Redis cluster using a full backup and continuous binlog backup for Point-In-Time Recovery (PITR) on KubeBlocks. +keywords: [Redis, Full Backup, PITR, KubeBlocks] +sidebar_position: 6 +sidebar_label: Restore with PITR +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore a Redis Cluster from Backup with Point-In-Time-Recovery(PITR) on KubeBlocks + +This guide demonstrates how to perform Point-In-Time Recovery (PITR) for Redis clusters in KubeBlocks using: + +1. A full base backup +2. Continuous WAL (Write-Ahead Log) backups +3. Two restoration methods: + - Cluster Annotation (declarative approach) + - OpsRequest API (operational control) + +PITR enables recovery to any moment within the `timeRange` specified. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Prepare for PITR Restoration +To perform a PITR restoration, both a full backup and continuous backup are required. Refer to the documentation to configure these backups if they are not already set up. + +- Completed full backup +- Active continuous WAL backup +- Backup repository accessible +- Sufficient resources for new cluster + +To identify the list of full and continuous backups, you may follow the steps: + +### 1. Verify Continuous Backup +Confirm you have a continuous WAL backup, either running or completed: + +```bash +# expect EXACTLY ONE continuous backup per cluster +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=redis-replication +``` + +### 2. Check Backup Time Range +Get the valid recovery window: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +Expected Output: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. Identify Full Backup +Find available full backups that meet: +- Status: Completed +- Completion time after continuous backup start time + +```bash +# expect one or more Full backups +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=redis-replication +``` + +:::tip +KubeBlocks automatically selects the most recent qualifying full backup as the base. +Make sure there is a full backup meets the condition: its `stopTime`/`completionTimestamp` must **AFTER** Continuous backup's `startTime`, otherwise PITR restoration will fail. +::: + +## Option 1: Cluster Annotation Restoration + +### Step 1: Create Restored Cluster +Configure PITR parameters in cluster annotation: + +Key parameters: +- `name`: Continuous backup name +- `restoreTime`: Target recovery time (within backup `timeRange`) + +Apply this YAML configuration: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-restore-pitr + namespace: demo + annotations: + # NOTE: replace with the continuouse backup name + # NOTE: replace with a valid time within the backup timeRange. + kubeblocks.io/restore-from-backup: '{"redis":{"name":"","namespace":"demo","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### Step 3: Monitor Restoration +Track restore progress with: + +```bash +# Watch restore status +kubectl get restore -n demo -w + +# Watch cluster status +kubectl get cluster -n demo -w +``` + +## Option 2: OpsRequest API Restoration + +For operational control and monitoring, use the OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restore + namespace: demo +spec: + clusterName: redis-restore-pitr + force: false + restore: + backupName: + backupNamespace: demo + restorePointInTime: + type: Restore +``` + +### Monitor Restoration +Track progress with: + +```bash +# Watch restore operation +kubectl get restore -n demo -w + +# Verify cluster status +kubectl get cluster -n demo -w +``` + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete cluster redis-restore-pitr -n demo +kubectl delete ns demo +``` + +## Summary +This guide demonstrated how to restore a Redis cluster in KubeBlocks using a full backup and continuous backup for Point-In-Time Recovery (PITR). Key steps included: +- Verifying available backups. +- Extracting encrypted system account credentials. +- Creating a new Redis cluster with restoration configuration. +- Monitoring the restoration process. + +With this approach, you can restore a Redis cluster to a specific point in time, ensuring minimal data loss and operational continuity. + diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/_category_.yml new file mode 100644 index 00000000..cd4faeaf --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Backup And Restores +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..afd9e9a5 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,154 @@ +--- +title: Create a Redis Cluster with a Custom Root Password on KubeBlocks +description: Learn how to deploy a Redis cluster on KubeBlocks with a custom root password securely configured using Kubernetes Secrets. +keywords: [Redis, KubeBlocks, Custom Password, Kubernetes, Secrets] +sidebar_position: 1 +sidebar_label: Custom Password +--- + +# Create Redis Cluster With Custom Password on KubeBlocks + +This guide demonstrates how to deploy a Redis cluster in KubeBlocks with a custom root password stored in a Kubernetes Secret. + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Deploying the Redis Replication Cluster + +KubeBlocks uses a declarative approach for managing Redis clusters. Below is an example configuration for deploying a Redis cluster with 2 nodes (1 primary, 1 replicas) and a custom root password. + +### Step 1: Create a Secret for the Defaults Account + +The custom root password is stored in a Kubernetes Secret. Create the Secret by applying the following YAML: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-secret + namespace: demo +``` +- password: Replace custompassword with your desired password and encode it using Base64 (`echo -n "custompassword" | base64`). +- username: The default Redis default user is 'default', encoded as 'cm9vdA=='. + + +### Step 2: Deploy the Redis Cluster + +Apply the following manifest to deploy the Redis cluster, referencing the Secret created in Step 1 for the root account: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + systemAccounts: # override systemaccount password + - name: default + secretRef: + name: custom-secret + namespace: demo + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**Explanation of Key Fields** +- `systemAccounts`: Overrides system accounts defined in the referenced `ComponentDefinition`. + +:::tip + +In KubeBlocks Redis Addon, a list of system accounts is defined. And only those accounts can be customized with a new secret. + +::: + +To get the of accounts: +```bash +kubectl get cmpd redis-7-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +Expected Output: +```bash +default +``` + +## Verifying the Deployment + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## Connecting to the Redis Cluster + +KubeBlocks automatically creates a secret containing the Redis root credentials. Retrieve the credentials with the following commands: + +```bash +kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +To connect to the cluster's primary node, use the Redis client with the custom password: +```bash +kubectl exec -it -n demo redis-replication-redis-0 -c redis -- reids-cli -a ${PASSWD} +``` + +## Cleanup +To remove all created resources, delete the Redis cluster along with its namespace: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete secret custom-secret -n demo +kubectl delete ns demo +``` + +## Summary +In this guide, you: +- Created a Kubernetes Secret to securely store a custom Redis default password. +- Deployed a Redis cluster in KubeBlocks with a custom root password. +- Verified the deployment and connected to the cluster's primary node using the Redis client. + +Using Kubernetes Secrets ensures secure credential management for your Redis clusters, while KubeBlocks simplifies the deployment and management process. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/_category_.yml new file mode 100644 index 00000000..bf29dd85 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +position: 6 +label: Custom Secret +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..f4b8b443 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,263 @@ +--- +title: Observability for Redis Clusters with the Prometheus Operator +description: Learn how to set up observability for Redis Clusters in KubeBlocks using the Prometheus Operator. Configure monitoring and visualize metrics with Grafana. +keywords: [KubeBlocks, Redis, Prometheus, Grafana, Observability, Metrics] +sidebar_position: 2 +sidebar_label: Observability for Redis Clusters +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis Monitoring with Prometheus Operator + +This guide demonstrates how to configure comprehensive monitoring for Redis clusters in KubeBlocks using: + +1. Prometheus Operator for metrics collection +2. Built-in Redis exporter for metrics exposure +3. Grafana for visualization + +## Prerequisites + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## Install Monitoring Stack + +### 1. Install Prometheus Operator +Deploy the kube-prometheus-stack using Helm: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. Verify Installation +Check all components are running: +```bash +kubectl get pods -n monitoring +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + +## Deploy a Redis Cluster + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +**Key Monitoring Configuration** +- `disableExporter: false` enables the built-in metrics exporter +- Exporter runs as sidecar container in each Redis pod +- Scrapes Redis metrics on port 9187 + +## Verifying the Deployment +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster redis-replication -n demo -w +``` + +Example Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Creating 50s +redis-replication redis Delete Running 4m2s +``` +Once the cluster status becomes Running, your Redis cluster is ready for use. + +## Configure Metrics Collection + +### 1. Verify Exporter Endpoint +Confirm metrics are exposed: + +```bash +kubectl get po redis-replication-redis-0 -n demo -oyaml | \ + yq '.spec.containers[] | select(.name=="metrics") | .ports' +``` + +Example Output: +```yaml +- containerPort: 9121 + name: http-metrics # Used in PodMonitor + protocol: TCP +``` + +Test metrics endpoint: + +```bash +kubectl -n demo exec -it pods/redis-replication-redis-0 -c metrics -- \ + curl -s http://127.0.0.1:9121/metrics | head -n 50 +``` + +### 2. Create PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: redis-replication-pod-monitor + namespace: demo + labels: # Must match the setting in 'prometheus.spec.podMonitorSelector' + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # defines the labels which are transferred from the + # associated Kubernetes 'Pod' object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: http-metrics # Must match exporter port name + scheme: http + namespaceSelector: + matchNames: + - demo # Target namespace + selector: + matchLabels: + app.kubernetes.io/instance: redis-replication + apps.kubeblocks.io/component-name: redis +``` +**PodMonitor Configuration Guide** + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `port` | Yes | Must match exporter port name ('http-metrics') | +| `namespaceSelector` | Yes | Targets namespace where Redis runs | +| `labels` | Yes | Must match Prometheus's podMonitorSelector | +| `path` | No | Metrics endpoint path (default: /metrics) | +| `interval` | No | Scraping interval (default: 30s) | + +## Verify Monitoring Setup + +### 1. Check Prometheus Targets +Forward and access Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +Open your browser and navigate to: +http://localhost:9090/targets + +Check if there is a scrape job corresponding to the PodMonitor (the job name is 'demo/redis-replication-pod-monitor'). + +Expected State: +- The State of the target should be UP. +- The target's labels should include the ones defined in podTargetLabels (e.g., 'app_kubernetes_io_instance'). + +### 2. Test Metrics Collection +Verify metrics are being scraped: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=redis_up{app_kubernetes_io_instance="redis-replication"}' | jq +``` + +Example Output: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "redis_up", + "app_kubernetes_io_instance": "redis-replication", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "redis", + "apps_kubeblocks_io_pod_name": "redis-replication-redis-1", + "container": "metrics", + "endpoint": "http-metrics", + "instance": "10.244.0.233:9121", + "job": "kubeblocks", + "namespace": "demo", + "pod": "redis-replication-redis-1" + }, + "value": [ + 1747475968.165, + "1" + ] + }, + { + "metric": { + "__name__": "redis_up", + "app_kubernetes_io_instance": "redis-replication", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "redis", + "apps_kubeblocks_io_pod_name": "redis-replication-redis-0", + "container": "metrics", + "endpoint": "http-metrics", + "instance": "10.244.0.231:9121", + "job": "kubeblocks", + "namespace": "demo", + "pod": "redis-replication-redis-0" + }, + "value": [ + 1747475968.165, + "1" + ] + } + ] + } +} +``` +## Visualize in Grafana + +### 1. Access Grafana +Port-forward and login: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +Open your browser and navigate to http://localhost:3000. Use the default credentials to log in: +- Username: 'admin' +- Password: 'prom-operator' (default) + +### 2. Import Dashboard +Import the KubeBlocks Redis dashboard: + +1. In Grafana, navigate to "+" → "Import" +2. Choose one of these methods: + - Paste the dashboard URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/redis/dashboards/redis.json` + - Or upload the JSON file directly + +**Dashboard Includes:** +- Cluster status overview +- Query performance metrics +- Connection statistics +- Replication health + +![redis-monitoring-grafana-dashboard.png](/img/docs/en/redis-monitoring-grafana-dashboard.png) + + +## Delete +To delete all the created resources, run the following commands: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +kubectl delete podmonitor redis-replication-pod-monitor -n demo +``` + +## Summary +In this tutorial, we set up observability for a Redis cluster in KubeBlocks using the Prometheus Operator. +By configuring a `PodMonitor`, we enabled Prometheus to scrape metrics from the Redis exporter. +Finally, we visualized these metrics in Grafana. This setup provides valuable insights for monitoring the health and performance of your Redis databases. \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/_category_.yml new file mode 100644 index 00000000..6953a6d9 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: Monitoring +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/09-faqs.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/09-faqs.mdx new file mode 100644 index 00000000..cefd7d5a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/09-faqs.mdx @@ -0,0 +1,114 @@ +--- +title: FAQs +description: FAQs of Redis +keywords: [KubeBlocks, Redis, Kubernetes Operator] +sidebar_position: 9 +sidebar_label: FAQs +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis FAQs + +## 1. How to disable auth for 'default' user + +To disable auth for 'default' user, you can unset the environment variable `REDIS_DEFAULT_PASSWORD` when creating the cluster. +```bash +# unset REDIS_DEFAULT_PASSWORD to disable auth for 'default' user +env: + - name: REDIS_DEFAULT_PASSWORD +``` + +for instance, when creating a replication cluster without auth, you use the following yaml: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication-without-auth + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + env: + - name: REDIS_DEFAULT_PASSWORD # unset this env var to disable auth for default user + value: "" + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + serviceVersion: "7.2.4" + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +Similarly, you can disable auth for 'default' user when creating a sharding cluster: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-sharding-without-auth + namespace: demo +spec: + terminationPolicy: Delete + shardings: + - name: shard + shards: 3 + template: + name: redis + componentDef: redis-cluster-7 + replicas: 2 + env: + - name: REDIS_DEFAULT_PASSWORD # unset this env var to disable auth for default user + value: "" + resources: + limits: + cpu: '1' + memory: 1Gi + requests: + cpu: '1' + memory: 1Gi + serviceVersion: 7.2.4 + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/_category_.yml new file mode 100644 index 00000000..9f8961de --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/_category_.yml @@ -0,0 +1,4 @@ +position: 12 +label: KubeBlocks for Redis Community Edition +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_category_.yml new file mode 100644 index 00000000..753acb79 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: tpl +collapsible: true +collapsed: false +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx new file mode 100644 index 00000000..c6196598 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx @@ -0,0 +1,55 @@ +KubeBlocks uses a declarative approach for managing Redis Replication Clusters. +Below is an example configuration for deploying a Redis Replication Cluster with two components, redis and redis sentinel. + +Apply the following YAML configuration to deploy the cluster: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_prerequisites.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..e632dc41 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +Before proceeding, ensure the following: +- Environment Setup: + - A Kubernetes cluster is up and running. + - The kubectl CLI tool is configured to communicate with your cluster. + - [KubeBlocks CLI](../../user_docs/references/install-kbcli) and [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks) are installed. Follow the installation instructions here. +- Namespace Preparation: To keep resources isolated, create a dedicated namespace for this tutorial: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx new file mode 100644 index 00000000..7745e40a --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx @@ -0,0 +1,33 @@ +Monitor the cluster status until it transitions to the Running state: +```bash +kubectl get cluster redis-replication -n demo -w +``` + +Expected Output: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 3m49s +``` + +Check the pod status and roles: +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +``` + +Expected Output: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + +Once the cluster status becomes Running, your Redis cluster is ready for use. + +:::tip +If you are creating the cluster for the very first time, it may take some time to pull images before running. + +::: diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/01-introduction.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/01-introduction.mdx new file mode 100644 index 00000000..d7f2dca3 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/01-introduction.mdx @@ -0,0 +1,35 @@ +--- +title: Starrocks +description: KubeBlocks for Starrocks, an alternative to dedicated operator. +keywords: [starrocks, analytic, data warehouse, control plane, operator] +sidebar_position: 1 +sidebar_label: Starrocks +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# StarRocks + +StarRocks is a Linux Foundation project, it is the next-generation data platform designed to make data-intensive real-time analytics fast and easy. + +StarRocks supports **shared-nothing** (Each BE has a portion of the data on its local storage) and **shared-data** (all data on object storage or HDFS and each CN has only cache on local storage). + +- FrontEnds (FE) are responsible for metadata management, client connection management, query planning, and query scheduling. Each FE stores and maintains a complete copy of the metadata in its memory, which guarantees indiscriminate services among the FEs. +- BackEnds (BE) are responsible for data storage, data processing, and query execution. Each BE stores a portion of the data and processes the queries in parallel. + +KubeBlocks supports creating a **shared-nothing** StarRocks cluster. + +## Supported Features + +### Lifecycle Management + +| Topology | Horizontal
scaling | Vertical
scaling | Expand
volume | Restart | Stop/Start | Configure | Expose | Switchover | +|------------------|------------------------|-----------------------|-------------------|-----------|------------|-----------|--------|------------| +| shared-nothing | Yes | Yes | Yes | Yes | Yes | No | Yes | N/A | + +### Versions + +| Major Versions | Description | +|----------------|-------------| +| 3.3.x | 3.3.0, 3.3.2| diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/02-provision.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/02-provision.mdx new file mode 100644 index 00000000..78ff7149 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/02-provision.mdx @@ -0,0 +1,92 @@ +--- +title: Provision +description: How to Provision StarRocks on KubeBlocks +keywords: [starrocks, analytic, data warehouse, control plane] +sidebar_position: 2 +sidebar_label: Provision +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Before you start + +- [Install kbcli](./../installation/install-kbcli.md) if you want to manage the StarRocks cluster with `kbcli`. +- [Install KubeBlocks](./../installation/install-kubeblocks.md). +- [Install and enable the starrocks Addon](./../installation/install-addons.md). +- To keep things isolated, create a separate namespace called `demo` throughout this tutorial. + + ```bash + kubectl create namespace demo + ``` + +## Create a cluster + +KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a StarRocks cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](../user_docs/references/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. + +```yaml +cat < + + + +```bash +kubectl get cluster mycluster -n demo +> +NAME CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS AGE +mycluster starrocks starrocks-3.1.1 Delete Running 4m29s +``` + + + + + +```bash +kbcli cluster list mycluster -n demo +> +NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME +mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800 +``` + + + + + +#### Steps + + + + + +1. Apply an OpsRequest to the specified cluster. Configure the parameters according to your needs. + + ```bash + kubectl apply -f - < + NAMESPACE NAME TYPE CLUSTER STATUS PROGRESS AGE + demo mycluster-vertical-scaling VerticalScaling mycluster Succeed 3/3 6m + ``` + + If an error occurs, you can troubleshoot with `kubectl describe ops -n demo` command to view the events of this operation. + +3. Check whether the corresponding resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Change the configuration of `spec.componentSpecs.resources` in the YAML file. `spec.componentSpecs.resources` controls the requirement and limit of resources and changing them triggers a vertical scaling. + + ```bash + kubectl edit cluster mycluster -n demo + ``` + + Edit the values of `spec.componentSpecs.resources`. + + ```yaml + ... + spec: + clusterDefinitionRef: starrocks-ce + clusterVersionRef: starrocks-ce-3.1.1 + componentSpecs: + - name: fe + componentDefRef: fe + replicas: 2 + resources: # Change the values of resources + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "4Gi" + cpu: "2" + ... + ``` + +2. Check whether the corresponding resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Set the `--cpu` and `--memory` values according to your needs and run the following command to perform vertical scaling. + + ```bash + kbcli cluster vscale mycluster -n demo --cpu=2 --memory=20Gi --components=be + ``` + + Please wait a few seconds until the scaling process is over. + +2. Validate the vertical scaling operation. + + - View the OpsRequest progress. + + KubeBlocks outputs a command automatically for you to view the OpsRequest progress. The output includes the status of this OpsRequest and Pods. When the status is `Succeed`, this OpsRequest is completed. + + ```bash + kbcli cluster describe-ops mycluster-verticalscaling-smx8b -n demo + ``` + + - Check the cluster status. + + ```bash + kbcli cluster list mycluster -n demo + > + NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME + mycluster demo starrocks starrocks-3.1.1 Delete Updating Jul 17,2024 19:06 UTC+0800 + ``` + + - STATUS=Updating: it means the vertical scaling is in progress. + - STATUS=Running: it means the vertical scaling operation has been applied. + - STATUS=Abnormal: it means the vertical scaling is abnormal. The reason may be that the number of the normal instances is less than that of the total instance or the leader instance is running properly while others are abnormal. + > To solve the problem, you can manually check whether this error is caused by insufficient resources. Then if AutoScaling is supported by the Kubernetes cluster, the system recovers when there are enough resources. Otherwise, you can create enough resources and troubleshoot with `kubectl describe` command. + +3. After the OpsRequest status is `Succeed` or the cluster status is `Running` again, check whether the corresponding resources change. + + ```bash + kbcli cluster describe mycluster -n demo + ``` + + + + + +### Scale horizontally + +Horizontal scaling changes the amount of pods. For example, you can scale out replicas from three to five. + +From v0.9.0, besides replicas, KubeBlocks also supports scaling in and out instances, refer to the [Horizontal Scale tutorial](./../maintenance/scale/horizontal-scale.md) for more details and examples. + +#### Before you start + +Check whether the cluster status is `Running`. Otherwise, the following operations may fail. + + + + + +```bash +kubectl get cluster mycluster -n demo +> +NAME CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS AGE +mycluster starrocks starrocks-3.1.1 Delete Running 4m29s +``` + + + + + +```bash +kbcli cluster list mycluster -n demo +> +NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME +mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800 +``` + + + + + +#### Steps + + + + + +1. Apply an OpsRequest to a specified cluster. Configure the parameters according to your needs. + + The example below means adding two replicas for the component `fe`. + + ```bash + kubectl apply -f - < + NAMESPACE NAME TYPE CLUSTER STATUS PROGRESS AGE + demo mycluster-horizontal-scaling HorizontalScaling mycluster Succeed 3/3 6m + ``` + + If an error occurs, you can troubleshoot with `kubectl describe ops -n demo` command to view the events of this operation. + +3. Check whether the corresponding resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Change the configuration of `spec.componentSpecs.replicas` in the YAML file. `spec.componentSpecs.replicas` stands for the pod amount and changing this value triggers a horizontal scaling of a cluster. + + ```bash + kubectl edit cluster mycluster -n demo + ``` + + Edit the values of `spec.componentSpecs.replicas`. + + ```yaml + ... + spec: + clusterDefinitionRef: starrocks-ce + clusterVersionRef: starrocks-ce-3.1.1 + componentSpecs: + - name: fe + componentDefRef: fe + replicas: 2 # Change this value + ... + ``` + +2. Check whether the corresponding resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Configure the parameters `--components` and `--replicas`, and run the command. + + ```bash + kbcli cluster hscale mycluster --replicas=3 --components=be -n demo + ``` + + - `--components` describes the component name ready for horizontal scaling. + - `--replicas` describes the replica amount of the specified components. Edit the amount based on your demands to scale in or out replicas. + + Please wait a few seconds until the scaling process is over. + +2. Validate the vertical scaling. + + - View the OpsRequest progress. + + KubeBlocks outputs a command automatically for you to view the OpsRequest progress. The output includes the status of this OpsRequest and Pods. When the status is `Succeed`, this OpsRequest is completed. + + ```bash + kbcli cluster describe-ops mycluster-horizontalscaling-ffp9p -n demo + ``` + + - View the cluster status. + + ```bash + kbcli cluster list mycluster -n demo + ``` + + - STATUS=Updating: it means horizontal scaling is in progress. + - STATUS=Running: it means horizontal scaling has been applied. + +3. After the OpsRequest status is `Succeed` or the cluster status is `Running` again, check whether the corresponding resources change. + + ```bash + kbcli cluster describe mycluster -n demo + ``` + + + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/04-stop-and-start.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/04-stop-and-start.mdx new file mode 100644 index 00000000..8c2bbdb4 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/04-stop-and-start.mdx @@ -0,0 +1,197 @@ +--- +title: Stop/Start +description: How to Stop/Start StarRocks on KubeBlocks +keywords: [starrocks, analytic, data warehouse, control plane] +sidebar_position: 4 +sidebar_label: Stop/Start +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Stop/Start a cluster + +You can stop/start a cluster to save computing resources. When a cluster is stopped, the computing resources of this cluster are released, which means the pods of Kubernetes are released, but the storage resources are reserved. You can start this cluster again by snapshots if you want to restore the cluster resources. + +### Stop a cluster + +1. Configure the name of your cluster and run the command below to stop this cluster. + + + + + + Apply an OpsRequest to restart a cluster. + + ```bash + kubectl apply -f - < + + + + ```bash + kubectl edit cluster mycluster -n demo + ``` + + Configure replicas as 0 to delete pods. + + ```yaml + ... + spec: + clusterDefinitionRef: starrocks-ce + clusterVersionRef: starrocks-ce-3.1.1 + terminationPolicy: Delete + affinity: + podAntiAffinity: Preferred + topologyKeys: + - kubernetes.io/hostname + tolerations: + - key: kb-data + operator: Equal + value: 'true' + effect: NoSchedule + componentSpecs: + - name: fe + componentDefRef: fe + serviceAccountName: kb-starrocks-cluster + replicas: 0 # Change this value + - name: be + componentDefRef: be + replicas: 0 # Change this value + ``` + + + + + + ```bash + kbcli cluster stop mycluster -n demo + ``` + + + + + +2. Check the status of the cluster to see whether it is stopped. + + + + + + ```bash + kubectl get cluster mycluster -n demo + ``` + + + + + + ```bash + kbcli cluster list mycluster -n demo + ``` + + + + + +### Start a cluster + +1. Configure the name of your cluster and run the command below to start this cluster. + + + + + + Apply an OpsRequest to start a cluster. + + ```bash + kubectl apply -f - < + + + + ```bash + kubectl edit cluster mycluster -n demo + ``` + + Change replicas back to the original amount to start this cluster again. + + ```yaml + spec: + clusterDefinitionRef: starrocks-ce + clusterVersionRef: starrocks-ce-3.1.1 + terminationPolicy: Delete + affinity: + podAntiAffinity: Preferred + topologyKeys: + - kubernetes.io/hostname + tolerations: + - key: kb-data + operator: Equal + value: 'true' + effect: NoSchedule + componentSpecs: + - name: fe + componentDefRef: fe + serviceAccountName: kb-starrocks-cluster + replicas: 1 # Change this value + - name: be + componentDefRef: be + replicas: 2 # Change this value + ``` + + + + + + ```bash + kbcli cluster start mycluster -n demo + ``` + + + + + +2. Check the status of the cluster to see whether it is running again. + + + + + + ```bash + kubectl get cluster mycluster -n demo + ``` + + + + + + ```bash + kbcli cluster list mycluster -n demo + ``` + + + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/05-restart.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/05-restart.mdx new file mode 100644 index 00000000..6d520e71 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/05-restart.mdx @@ -0,0 +1,77 @@ +--- +title: Restart +description: How to Restart StarRocks on KubeBlocks +keywords: [starrocks, analytic, data warehouse, control plane] +sidebar_position: 5 +sidebar_label: Restart +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Restart + + + + + +1. Restart a cluster. + + ```bash + kubectl apply -f - < + + + +1. Configure the values of `components` and `ttlSecondsAfterSucceed` and run the command below to restart a specified cluster. + + ```bash + kbcli cluster restart mycluster -n demo --components="starrocks" --ttlSecondsAfterSucceed=30 + ``` + + - `components` describes the component name that needs to be restarted. + - `ttlSecondsAfterSucceed` describes the time to live of an OpsRequest job after the restarting succeeds. + +2. Validate the restarting. + + Run the command below to check the cluster status to check the restarting status. + + ```bash + kbcli cluster list mycluster -n demo + > + NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME + mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800 + ``` + + * STATUS=Updating: it means the cluster restart is in progress. + * STATUS=Running: it means the cluster has been restarted. + + + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/06-expand-volume.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/06-expand-volume.mdx new file mode 100644 index 00000000..0ab178e1 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/06-expand-volume.mdx @@ -0,0 +1,165 @@ +--- +title: Expand Volume +description: How to Expand Volume for StarRocks on KubeBlocks +keywords: [starrocks, analytic, data warehouse, control plane] +sidebar_position: 6 +sidebar_label: Expand Volume +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Volume expansion + +### Before you start + +Check whether the cluster status is `Running`. Otherwise, the following operations may fail. + + + + + +```bash +kubectl get cluster mycluster -n demo +> +NAME CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS AGE +mycluster starrocks starrocks-3.1.1 Delete Running 4m29s +``` + + + + + +```bash +kbcli cluster list mycluster -n demo +> +NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME +mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800 +``` + + + + + +### Steps + + + + + +1. Change the value of storage according to your need and run the command below to expand the volume of a cluster. + + ```yaml + kubectl apply -f - < + NAMESPACE NAME TYPE CLUSTER STATUS PROGRESS AGE + demo mycluster-volume-expansion VolumeExpansion mycluster Succeed 3/3 6m + ``` + + If an error occurs, you can troubleshoot with `kubectl describe ops -n demo` command to view the events of this operation. + +3. Check whether the corresponding cluster resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Change the value of `spec.componentSpecs.volumeClaimTemplates.spec.resources` in the cluster YAML file. + + `spec.componentSpecs.volumeClaimTemplates.spec.resources` is the storage resource information of the pod and changing this value triggers the volume expansion of a cluster. + + ```bash + kubectl edit cluster mycluster -n demo + ``` + + Edit the values of `spec.componentSpecs.volumeClaimTemplates.spec.resources`. + + ```yaml + ... + spec: + clusterDefinitionRef: starrocks-ce + clusterVersionRef: starrocks-ce-3.1.1 + componentSpecs: + - name: be + componentDefRef: be + volumeClaimTemplates: + - name: be-storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 40Gi # Change the volume storage size + ... + ``` + +2. Check whether the corresponding cluster resources change. + + ```bash + kubectl describe cluster mycluster -n demo + ``` + + + + + +1. Set the `--storage` value according to your need and run the command to expand the volume. + + ```bash + kbcli cluster volume-expand mycluster -n demo --storage=40Gi --components=be + ``` + + The volume expansion may take a few minutes. + +2. Validate the volume expansion operation. + + - View the OpsRequest progress. + + KubeBlocks outputs a command automatically for you to view the details of the OpsRequest progress. The output includes the status of this OpsRequest and PVC. When the status is `Succeed`, this OpsRequest is completed. + + ```bash + kbcli cluster describe-ops mycluster-volumeexpansion-5pbd2 -n demo + ``` + + - View the cluster status. + + ```bash + kbcli cluster list mycluster -n demo + > + NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME + mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800 + ``` + +3. After the OpsRequest status is `Succeed` or the cluster status is `Running` again, check whether the corresponding resources change. + + ```bash + kbcli cluster describe mycluster -n demo + ``` + + + + \ No newline at end of file diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/10-delete.mdx b/docs/en/release-1_0_1/kubeblocks-for-starrocks/10-delete.mdx new file mode 100644 index 00000000..06ed2dfc --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/10-delete.mdx @@ -0,0 +1,82 @@ +--- +title: Delete +description: How toDelete StarRocks on KubeBlocks +keywords: [starrocks, analytic, data warehouse, control plane] +sidebar_position: 10 +sidebar_label: Delete +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Delete a cluster + +### Termination policy + +:::note + +The termination policy determines how a cluster is deleted. + +::: + +| **terminationPolicy** | **Deleting Operation** | +|:----------------------|:-------------------------------------------------| +| `DoNotTerminate` | `DoNotTerminate` prevents deletion of the Cluster. This policy ensures that all resources remain intact. | +| `Delete` | `Delete` deletes Cluster resources like Pods, Services, and Persistent Volume Claims (PVCs), leading to a thorough cleanup while removing all persistent data. | +| `WipeOut` | `WipeOut` is an aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. | + +To check the termination policy, execute the following command. + + + + + +```bash +kubectl get cluster mycluster -n demo +> +NAME CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS AGE +mycluster starrocks starrocks-3.1.1 Delete Running 34m +``` + + + + + +```bash +kbcli cluster list mycluster -n demo +> +NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME +mycluster demo Delete Running Sep 30,2024 13:03 UTC+0800 +``` + + + + + +### Steps + +Run the command below to delete a specified cluster. + + + + + +If you want to delete a cluster and its all related resources, you can modify the termination policy to `WipeOut`, then delete the cluster. + +```bash +kubectl patch -n demo cluster mycluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" + +kubectl delete -n demo cluster mycluster +``` + + + + + +```bash +kbcli cluster delete mycluster -n demo +``` + + + + diff --git a/docs/en/release-1_0_1/kubeblocks-for-starrocks/_category_.yml b/docs/en/release-1_0_1/kubeblocks-for-starrocks/_category_.yml new file mode 100644 index 00000000..557e6f62 --- /dev/null +++ b/docs/en/release-1_0_1/kubeblocks-for-starrocks/_category_.yml @@ -0,0 +1,4 @@ +position: 21 +label: KubeBlocks for StarRocks +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/kbcli_template.mdx b/docs/en/release-1_0_1/release_notes/kbcli_template.mdx new file mode 100644 index 00000000..e48d0530 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/kbcli_template.mdx @@ -0,0 +1,3 @@ +# KBCLI $kbcli_version ($today) + +## sha256 sumcheck diff --git a/docs/en/release-1_0_1/release_notes/template.mdx b/docs/en/release-1_0_1/release_notes/template.mdx new file mode 100644 index 00000000..4c3de587 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/template.mdx @@ -0,0 +1,192 @@ +# KubeBlocks $kubeblocks_version ($today) + +We're happy to announce the release of KubeBlocks $kubeblocks_version! 🚀 🎉 🎈 + +This release introduces Redis, a key-value database, and MongoDB, a document-based database. It also supports the primary-secondary topology of PostgreSQL, adapts to more public cloud vendors' hosted Kubernetes versions, improves data backup and recovery experiences, and builds basic data migration capability. We noticed that some users may think that K8s reduces database performance. So in this release we include a comparison test result to explain the throughput and RT differences of various MySQL 8.0 deployment forms on AWS. + +We would like to extend our appreciation to all contributors who helped make this release happen. + +## **Highlights** + +- KubeBlocks supports the primary-secondary topology of PostgreSQL + Users can actively switch the primary-secondary role of the database cluster with kbcli, or passively trigger failover by deleting a specified Kubernetes pod with kubectl. Failover generally completes within 30 seconds when there are no long transactions and large table DDLs. +- KubeBlocks supports Redis v7.0 + Redis is currently the most popular open-source key-value database, supporting data types such as key-value, string, list, set, hash table, and ordered set. It provides extremely fast data read and write operations and is suitable for cache scenarios in e-commerce, social communication, game, and other internet applications. To provide stable, secure, and efficient Redis services to users, KubeBlocks has adopted Redis 7.0 version, which is currently recommended officially, supporting standalone and primary-secondary topologies. Thus, users can perform operations such as creating, deleting, scaling, backing up, restoring, monitoring, alerting, and changing parameters of Redis clusters in development, testing, and production environments. +- KubeBlocks supports MongoDB v5.0 + MongoDB is currently the most popular document-based database, using JSON data types and dynamic schema designs to maintain high flexibility and scalability. KubeBlocks supports the replica set topology of MongoDB v5.0, providing data redundancy and automatic failover capabilities, ensuring data availability and consistency in the event of a node failure. The replica set topology cluster has one primary node (Primary) and several secondary nodes (Secondary), with the primary node handling all write requests and the secondary nodes handling some read requests. If the primary node fails, one of the secondary nodes is elected as the new primary node. +- KubeBlocks supports the private deployment of ChatGPT retrieval plugin + For users who do not want to expose sensitive information (such as company documents, meeting minutes, emails), OpenAI has open-sourced the ChatGPT retrieval plugin to enhance the ChatGPT experience. As long as users meet OpenAI's requirements, they can run the ChatGPT retrieval plugin through KubeBlocks addon, store the vectorized data of sensitive information in a private database, and enable ChatGPT to have longer memory of the context while ensuring information security. +- KubeBlocks supports one-command launching of playgrounds on Alibaba Cloud, Tencent Cloud, and GCP + Public cloud vendors' hosted Kubernetes services have significant differences in version, functionality, and integration, so even if the deployment of stateful services is not difficult, but Kubernetes administrators have to do a lot of extra heavy lifting to run stateful services normally. After supporting AWS, KubeBlocks provides the ability to one-command launch playgrounds on Alibaba Cloud, Tencent Cloud, and GCP. Users only need to set up public cloud AK locally, and then execute the kbcli playground init command, and KubeBlocks will automatically apply for resources and configure permissions in the specified region, making it easy for users to experience complete functionality. After trying KubeBlocks out, you can clean up the playground environment with one command to avoid incurring costs. + +## **Breaking changes** + +- Breaking changes between v0.5 and v0.4. Uninstall v0.4 (including any older version) before installing v0.5. + - Move the backupPolicyTemplate API from dataprotection group to apps group. + Before installing v0.5, please ensure that the resources have been cleaned up: + ``` + kubectl delete backuppolicytemplates.dataprotection.kubeblocks.io --all + kubectl delete backuppolicies.dataprotection.kubeblocks.io --all + ``` + - redefines the phase of cluster and component. + Before installing v0.5, please ensure that the resources have been cleaned up: + ``` + kubectl delete clusters.apps.kubeblocks.io --all + kubectl delete opsrequets.apps.kubeblocks.io --all + ``` +- `addons.extensions.kubeblocks.io` API deleted `spec.helm.valuesMapping.jsonMap.additionalProperties`, `spec.helm.valuesMapping.valueMap.additionalProperties`, `spec.helm.valuesMapping.extras.jsonMap.additionalProperties` and `spec.helm.valuesMapping.extras.valueMap.additionalProperties` attributes that was introduced by CRD generator, all existing Addons API YAML shouldn't have referenced these attributes. + + +## **Known issues and limitations** +* Limitations of cluster's horizontal scale operation: + * Only support VolumeSnapshot API to make a clone of Cluster's PV for syncing data when horizontal scaling. + * Only 1st pod container and 1st volume mount associated PV will be processed for VolumeSnapshot, do assure that data volume is placed in 1st pod container's 1st volume mount. + * Unused PVCs will be deleted in 30 minutes after scale in. + +If you're new to KubeBlocks, visit the [getting started](https://github.com/apecloud/kubeblocks/blob/v$kubeblocks_version/docs/user_docs/quick_start_guide.md) page and get a quick start with KubeBlocks. + +$warnings + +See [this](#upgrading-to-kubeblocks-$kubeblocks_version) section to upgrade KubeBlocks to version $kubeblocks_version. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +$kubeblocks_contributors + +## What's Changed +### New Features +#### MySQL +- Support ZEngine storage engine +- Account management supports creating, modifying, and deleting database accounts with different permissions + PostgreSQL +- Support migration from AWS RDS to KubeBlocks, supporting pre-checks, full migration, and incremental synchronization, verifying the data migration capabilities of CadenceWorkflow and OpenStreetMap +- Support for pgvector extension +- Support for the primary-secondary topology of PostgreSQL +- Automated failover and self-healing +- Support point-in-time recovery +- Account management supports creating, modifying, and deleting database accounts with different permissions + +#### Redis +- Full lifecycle management, including creation, deletion, restart, horizontal/vertical scaling +- Support Redis primary-secondary topology +- Automated failover and self-healing +- Support snapshot backup and recovery +- Metric monitoring, including cluster's basic operation status, connection, OS resources, performance, primary-secondary replication status and other metrics +- Alerts including cluster downtime, OS resource, abnormal connection number, primary-secondary replication abnormality, etc. +- Parameter reconfigure +- Account management + +#### MongoDB +- Full lifecycle management, including creation, deletion, restart, vertical scaling, and disk expansion +- Endpoint exposes the access addresses of all nodes +- File-based full backup and recovery +- Automated failover and self-healing +- Monitoring, alerting and logs +- Parameter reconfigure + +$kubeblocks_changes + +### Easy of Use +- `kbcli playground` supports one-command launching on running environments of Alibaba Cloud, Tencent Cloud, and GCP to experience complete KubeBlocks functionality +- kbcli supports creating clusters by entering CPU, memory, or class type +- kbcli supports tagging related resources of cluster +- kbcli is compatible with macOS package manager `brew` +- kbcli supports `preflight` command to check whether the environment meets the requirements for installing KubeBlocks +- kbcli adds object storage addon for storing full file backups, logs, and other data +- `kbcli install` runs preflight to check whether the environment meets the requirements, including node taints, storage class, and other check rules +- kbcli addon adds timeout parameter, printing exception information when enable fails +- Addon inherits the affinity and tolerations configuration of KubeBlocks +- `kbcli uninstall` prompts information to delete backup files, printing log information if the deletion fails +- ClusterDefinition API `spec.connectionCredential` add following built-in variables: + - Headless service FQDN `$(HEADLESS_SVC_FQDN)` placeholder, value pattern - `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute + +#### Compatibility +- Compatible with AWS EKS v1.22/v1.23/v1.24/v1.25 +- Compatible with Alibaba Cloud ACK v1.22/v1.24 +- Compatible with Tencent Cloud TKE standard cluster v1.22/v1.24 +- Compatible with GCP GKE standard cluster v1.24/v1.25 + +#### Stability +- KubeBlocks limits the combination of CPU and memory to avoid unreasonable configurations that reduce resource utilization or system stability + +#### Performance +- High-availability MySQL 8.0 with 4C 8GB 500GB, throughput and RT differences of various products on AWS, including ApeCloud MySQL Raft group, AWS RDS operator, Operator for Percona Server for MySQL, Oracle MySQL Operator for Kubernetes + +### API changes +- New APIs: + - backuppolicytemplates.apps.kubeblocks.io + +- Deleted APIs: + - backuppolicytemplates.dataprotection.kubeblocks.io + +- New API attributes: + - clusterdefinitions.apps.kubeblocks.io API + - spec.type + - spec.componentDefs.customLabelSpecs + - clusterversions.apps.kubeblocks.io API + - spec.componentVersions.clientImage (EXPERIMENTAL) + - clusters.apps.kubeblocks.io API + - spec.componentSpecs.classDefRef + - spec.componentSpecs.serviceAccountName + - configconstraints.apps.kubeblocks.io API + - spec.reloadOptions.shellTrigger.namespace + - spec.reloadOptions.shellTrigger.scriptConfigMapRef + - spec.reloadOptions.tplScriptTrigger.sync + - spec.selector + - opsrequests.apps.kubeblocks.io API + - spec.restoreFrom + - spec.verticalScaling.class + - status.reconfiguringStatus.configurationStatus.updatePolicy + - backuppolicies.dataprotection.kubeblocks.io API + - spec.full + - spec.logfile + - spec.retention + - backups.dataprotection.kubeblocks.io + - status.manifests + - backuptools.dataprotection.kubeblocks.io + - spec.type + +- Renamed API attributes: + - clusterdefinitions.apps.kubeblocks.io API + - spec.componentDefs.horizontalScalePolicy.backupTemplateSelector -> spec.componentDefs.horizontalScalePolicy.backupPolicyTemplateName + - spec.componentDefs.probe.roleChangedProbe -> spec.componentDefs.probe.roleProbe + - backuppolicies.dataprotection.kubeblocks.io API + - spec.full + - restorejobs.dataprotection.kubeblocks.io API + - spec.target.secret.passwordKeyword -> spec.target.secret.passwordKey + - spec.target.secret.userKeyword -> spec.target.secret.usernameKey + - addons.extensions.kubeblocks.io API + - spec.helm.installValues.secretsRefs -> spec.helm.installValues.secretRefs + +- Deleted API attributes: + - opsrequests.apps.kubeblocks.io API + - status.observedGeneration + - backuppolicies.dataprotection.kubeblocks.io API + - spec.backupPolicyTemplateName + - spec.backupToolName + - spec.backupType + - spec.backupsHistoryLimit + - spec.hooks + - spec.incremental + - backups.dataprotection.kubeblocks.io API + - spec.ttl + - status.CheckPoint + - status.checkSum + - addons.extensions.kubeblocks.io API + - spec.helm.valuesMapping.jsonMap.additionalProperties + - spec.helm.valuesMapping.valueMap.additionalProperties + - spec.helm.valuesMapping.extras.jsonMap.additionalProperties + - spec.helm.valuesMapping.extras.valueMap.additionalProperties + +- Updates API Status info: + - clusters.apps.kubeblocks.io API + - status.components.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting. + - status.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are ConditionsError, SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting. + - opsrequests.apps.kubeblocks.io API + - status.components.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting, Exposing. + - status.phase added 'Creating' phase. + +## Upgrading to KubeBlocks $kubeblocks_version +- N/A if upgrading from 0.4 or older version. diff --git a/docs/en/release-1_0_1/release_notes/v0.1.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.1.0/_category_.yml new file mode 100644 index 00000000..3e3e5d5a --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.1.0/_category_.yml @@ -0,0 +1,4 @@ +position: 9 +label: v0.1.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.1.0/template.mdx b/docs/en/release-1_0_1/release_notes/v0.1.0/template.mdx new file mode 100644 index 00000000..cab736d9 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.1.0/template.mdx @@ -0,0 +1,168 @@ +# KubeBlocks $kubeblocks_version ($today) + +We're happy to announce the release of KubeBlocks $kubeblocks_version! 🚀 🎉 🎈 + +We would like to extend our appreciation to all contributors who helped make this release happen. + +**Breaking changes** +* Reconstructed existing "dbaas.kubeblocks.io" API group to new "apps.kubeblocks.io" API group, affected following APIs: + - ClusterDefinition + - ClusterVersion + - Cluster + - ConfigConstraint + - OpsRequest +* Refactored ConfigTemplate related API, affected following APIs: + - ClusterDefinition + - ClusterVersion + +* Existing APIs will no longer be functional, please make sure you have removed the deprecated APIs and transformed CRDs before upgrade. Please refer to the upgrade notes under this release notes. + +**Highlights** + * Automatic pod container environment variables updates: + * [NEW] KB_POD_FQDN - KubeBlocks Cluster component workload associated headless service name, N/A if workloadType=Stateless. + * [NEW] KB_POD_IP - Pod IP address + * [NEW] KB_POD_IPS - Pod IP addresses + * [NEW] KB_HOST_IP - Host IP address + * [DEPRECATED] KB_PODIPS - Pod IP addresses + * [DEPRECATED] KB_PODIP - Pod IP address + * [DEPRECATED] KB_HOSTIP - Host IP address + * KB_POD_NAME - Pod Name + * KB_NAMESPACE - Namespace + * KB_SA_NAME - Service Account Name + * KB_NODENAME - Node Name + * KB_CLUSTER_NAME - KubeBlocks Cluster API object name + * KB_COMP_NAME - Running pod's KubeBlocks Cluster API object's `.spec.components.name` + * KB_CLUSTER_COMP_NAME - Running pod's KubeBlocks Cluster API object's `<.metadata.name>-<.spec.components.name>`, same name is used for Deployment or StatefulSet workload name, and Service object name + * New KubeBlocks addon extensions management (an addon components are part of KubeBlocks control plane extensions). Highlights include: + * New addons.extensions.kubeblocks.io API that provide running cluster installable check and auto-installation settings. + * Following addons are provided: + * Prometheus and Alertmanager + * AlertManager Webhook Adaptor + * Grafana + * KubeBlocks CSI driver + * S3 CSI driver + * Snapshot Controller + * KubeBlocks private network Load Balancer + * ApeCloud MySQL ClusterDefinition API + * Community PostgreSQL ClusterDefinition API + * Community Redis ClusterDefinition API + * Cluster availability demo application named NyanCat + * ClusterDefinition API `spec.connectionCredential` add following built-in variables: + * A random UUID v4 generator `$(UUID)` + * A random UUID v4 generator with BASE64 encoded `$(UUID_B64)` + * A random UUID v4 generator in UUID string then BASE64 encoded `$(UUID_STR_B64)` + * A random UUID v4 generator in HEX representation `$(UUID_HEX)` + * Service FQDN `$(SVC_FQDN)` placeholder, value pattern - $(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc, where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute + * Service ports `$(SVC_PORT_)` placeholder + * example usage: + + ```yaml + # ClusterDefinition API + kind: ClusterDefinition + metadata: + name: my-clusterdefinition + spec: + connectionCredential: + username: "admin" + "admin-password": "$(RANDOM_PASSWD)" + endpoint: "http://$(SVC_FQDN):$(SVC_PORT_http)" + + componentsDefs: + - name: my-comp-type + service: + ports: + - name: http + port: 8123 + + # Cluster API + kind: Cluster + metadata: + name: my-cluster + namespace: my-ns + spec: + clusterDefinitionRef: my-clusterdefinition + componentSpecs: + - name: my-comp + type: my-comp-type + + # output: + kind: Secret + metadata: + name: my-cluster-conn-credential + namespace: my-ns + labels: + "app.kubernetes.io/instance": my-cluster + stringData: + username: "admin" + admin-password: "" + endpoint: "http://my-cluster-my-comp.my-ns.svc:8123" + ``` + +**Known issues and limitations** + * Limitations of cluster's horizontal scale operation: + * Only support VolumeSnapshot API to make a clone of Cluster's PV for syncing data when horizontal scaling. + * Only 1st pod container and 1st volume mount associated PV will be processed for VolumeSnapshot, do assure that data volume is placed in 1st pod container's 1st volume mount. + * Unused PVCs will be deleted in 30 minutes after scale in. + +If you're new to KubeBlocks, visit the [getting started](https://github.com/apecloud/kubeblocks/blob/v$kubeblocks_version/docs/user_docs/quick_start_guide.md) page and get a quick start with KubeBlocks. + +$warnings + +See [this](#upgrading-to-kubeblocks-$kubeblocks_version) section to upgrade KubeBlocks to version $kubeblocks_version. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +$kubeblocks_contributors + +## What's Changed +$kubeblocks_changes + +## Upgrading to KubeBlocks $kubeblocks_version + +To upgrade to this release of KubeBlocks, follow the steps here to ensure a smooth upgrade. + +Release Notes for `v0.3.0`: +- Rename CRD name `backupjobs.dataprotection.kubeblocks.io` to `backups.dataprotection.kubeblocks.io` + - upgrade KubeBlocks with the following command: + ``` + helm upgrade --install kubeblocks kubeblocks/kubeblocks --version 0.3.0 + ``` + - after you upgrade KubeBlocks, check CRD `backupjobs.dataprotection.kubeblocks.io` and delete it + ``` + kubectl delete crd backupjobs.dataprotection.kubeblocks.io + ``` +- Rename CRD name `appversions.dbaas.kubeblocks.io` to `clusterversions.dbaas.kubeblocks.io` + - before you upgrade KubeBlocks, please backup your Cluster CR yaml first. + ``` + kubectl get cluster -oyaml > clusters.yaml + ``` + then replace all spec.appVersionRef to spec.clusterVersionRef in the clusters.yaml. + + Then, handle OpsRequest CR the same way. + - after you upgrade KubeBlocks, you can delete the CRD `appversions.dbaas.kubeblocks.io` + ``` + kubectl delete crd appversions.dbaas.kubeblocks.io + ``` + the last step, use the above backup of Clusters and OpsRequests to apply them. + ``` + kubectl apply -f clusters.yaml + ``` +- Rename group name `dbaas.kubeblocks.io` to `apps.kubeblocks.io` + - upgrade kubeblocks to create new CRDs, after that, you can delete the CRDs with group name`dbaas.kubeblocks.io` + +## Breaking Changes + +$kubeblocks_breaking_changes +* Refactored the use of labels. Existing clusters or config need to manually update their labels to ensure proper functionality. The following are specific changes: + - Pods of `statefulset` and `deployment` + - Replace label name from `app.kubernetes.io/component-name` to `apps.kubeblocks.io/component-name` + - Replace label name from `app.kubeblocks.io/workload-type` to `apps.kubeblocks.io/workload-type` + - Add label `app.kubernetes.io/version` with value `Cluster.Spec.ClusterVersionRef` + - Add label `app.kubernetes.io/component` with value `Cluster.Spec.ComponentSpecs.ComponentDefRef` + - CR `backuppolicytemplate` + - Replace label name from `app.kubernetes.io/created-by` to `app.kubernetes.io/managed-by` + - Configmap hosted by KubeBlocks and named with `*-env` suffix + - Replace label name from `app.kubernetes.io/config-type` to `apps.kubeblocks.io/config-type` +* With KubeBlocks Helm chart replaced its optional components install using sub-charts dependencies with Addons extensions API, previous version upgrade to this version will uninstall the optional components completely. diff --git a/docs/en/release-1_0_1/release_notes/v0.1.0/v0.1.0.mdx b/docs/en/release-1_0_1/release_notes/v0.1.0/v0.1.0.mdx new file mode 100644 index 00000000..c184417f --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.1.0/v0.1.0.mdx @@ -0,0 +1,188 @@ +--- +sidebar_label: v0.1.0 +--- + +# KubeBlocks 0.1.0 (2022-11-07) + +We're happy to announce the release of KubeBlocks 0.1.0, this is the initial release of KubeBlocks! 🚀 🎉 🎈 + +We would like to extend our thanks to all the new and existing contributors who helped make this release happen. + +**Highlights** + +* Support life cycle management of WeSQL(A highly available MySQL distribution), including create, delete, upgrade, vertical scaling, horizontal scaling, volume expansion, restart +* Support backup and restore +* Support configuration management +* Support floating ip based on elastic network interface and private ip +* Support Command Line Interface(CLI) dbctl, which can operate and maintain KubeBlocks and database cluster + +If you're new to KubeBlocks, visit the [getting started](https://kubeblocks.io) page and familiarize yourself with KubeBlocks. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +@dengshaojiang, @free6om, @heng4fun, @iziang, @JashBook, @ldming, @lynnleelhl, @nashtsai, @sophon-zt, @wangyelei, @weicao, @yangmeilly, @yimeisun, @ZhaoDiankui + +## What's Changed + +### New Features +- apecloud#4 install&destroy wesql(mysql x-cluster) on localhost. ([#72](https://github.com/apecloud/kubeblocks/pull/72), @ZhaoDiankui) +- support backup/restore from snapshot ([#93](https://github.com/apecloud/kubeblocks/pull/93), @dengshaojiang) +- #4 Demo5, Add dbclt playground guide and fix clusterdefiniti… ([#96](https://github.com/apecloud/kubeblocks/pull/96), @ZhaoDiankui) +- remove useless code and tidy up code layout for dbctl ([#105](https://github.com/apecloud/kubeblocks/pull/105), @ldming) +- implement unified logic for the list command ([#136](https://github.com/apecloud/kubeblocks/pull/136), @ldming) +- update dbctl version to 0.5.0 ([#139](https://github.com/apecloud/kubeblocks/pull/139), @ldming) +- dbctl support unified describe logic ([#162](https://github.com/apecloud/kubeblocks/pull/162), @ldming) +- move dbctl code to internal package ([#169](https://github.com/apecloud/kubeblocks/pull/169), @ldming) +- add minikube-[start|delete] Makefile jobs ([#177](https://github.com/apecloud/kubeblocks/pull/177), @nashtsai) +- add more test cases for dbctl ([#180](https://github.com/apecloud/kubeblocks/pull/180), @ldming) +- Support cluster affinity ([#187](https://github.com/apecloud/kubeblocks/pull/187), @heng4fun) +- dataprotection support wesql cluster. ([#188](https://github.com/apecloud/kubeblocks/pull/188), @dengshaojiang) +- refactor dbctl log and help ([#246](https://github.com/apecloud/kubeblocks/pull/246), @ldming) +- dbctl describe more cluster information ([#261](https://github.com/apecloud/kubeblocks/pull/261), @ldming) +- support exec and cluster connect command ([#280](https://github.com/apecloud/kubeblocks/pull/280), @ldming) +- dbctl support expose cluster ([#317](https://github.com/apecloud/kubeblocks/pull/317), @iziang) +- dbctl support describe network endpoints ([#324](https://github.com/apecloud/kubeblocks/pull/324), @iziang) +- add CI git-hooks-install make job ([#362](https://github.com/apecloud/kubeblocks/pull/362), @nashtsai) +- add pcregrep Chinese ([#363](https://github.com/apecloud/kubeblocks/pull/363), @JashBook) +- check branch name ([#389](https://github.com/apecloud/kubeblocks/pull/389), @JashBook) +- rename copyright and domain ([#390](https://github.com/apecloud/kubeblocks/pull/390), @ldming) +- remove chore ([#401](https://github.com/apecloud/kubeblocks/pull/401), @JashBook) +- ignore branch pattern main|master ([#409](https://github.com/apecloud/kubeblocks/pull/409), @JashBook) +- support debug controller and envtest ([#419](https://github.com/apecloud/kubeblocks/pull/419), @heng4fun) +- generate dbctl command reference doc ([#421](https://github.com/apecloud/kubeblocks/pull/421), @ldming) +- Hosting Helm Charts via GitHub Pages and Releases issue391 ([#422](https://github.com/apecloud/kubeblocks/pull/422), @yimeisun) +- issue link pr ([#444](https://github.com/apecloud/kubeblocks/pull/444), @JashBook) +- support go 1.19 ([#449](https://github.com/apecloud/kubeblocks/pull/449), @ldming) +- remove wesqlcluster helm and use new helm URL ([#478](https://github.com/apecloud/kubeblocks/pull/478), @ldming) + +### Bug Fixes +- #79 fix failed test cases ([#80](https://github.com/apecloud/kubeblocks/pull/80), @ldming) +- fixed lint Make job error ([#86](https://github.com/apecloud/kubeblocks/pull/86), @nashtsai) +- update mysql operator to 2.0.6 and use our own helm repo. ([#91](https://github.com/apecloud/kubeblocks/pull/91), @ldming) +- remove jq from install_dbctl.sh ([#121](https://github.com/apecloud/kubeblocks/pull/121), @ldming) +- [deploy/helm] fixed admission webhook templates ([#146](https://github.com/apecloud/kubeblocks/pull/146), @nashtsai) +- [deploy/helm] fixed admission webhook TLS CA secret ([#166](https://github.com/apecloud/kubeblocks/pull/166), @nashtsai) +- fixed Makefile error ([#167](https://github.com/apecloud/kubeblocks/pull/167), @nashtsai) +- playground destroy error ([#195](https://github.com/apecloud/kubeblocks/pull/195), @ldming) +- [deploy/helm] fixed admission webhook genSignedCert with aux CN … ([#198](https://github.com/apecloud/kubeblocks/pull/198), @nashtsai) +- fixed pod admission webhook path settings ([#200](https://github.com/apecloud/kubeblocks/pull/200), @nashtsai) +- fix makefile for building container ([#207](https://github.com/apecloud/kubeblocks/pull/207), @ldming) +- playground guide does not match the number of replicas ([#229](https://github.com/apecloud/kubeblocks/pull/229), @ldming) +- set io streams for cluster delete command ([#288](https://github.com/apecloud/kubeblocks/pull/288), @ldming) +- fixed #325 ([#328](https://github.com/apecloud/kubeblocks/pull/328), @nashtsai) +- fixed 'manifest' make job not working ([#340](https://github.com/apecloud/kubeblocks/pull/340), @nashtsai) +- configmap setup failed (#307) ([#349](https://github.com/apecloud/kubeblocks/pull/349), @sophon-zt) +- optimize lint staticcheck ([#351](https://github.com/apecloud/kubeblocks/pull/351), @heng4fun) +- roleGroup in samples and helm charts removed ([#367](https://github.com/apecloud/kubeblocks/pull/367), @free6om) +- show accessMode and name in cluster status ([#371](https://github.com/apecloud/kubeblocks/pull/371), @free6om) +- fix ops bug when cluster using consensussets ([#374](https://github.com/apecloud/kubeblocks/pull/374), @wangyelei) +- add keyword `omitempty` for optional field `topologyKeys` ([#375](https://github.com/apecloud/kubeblocks/pull/375), @heng4fun) +- [deploy/helm] fixed wrong template value ([#377](https://github.com/apecloud/kubeblocks/pull/377), @nashtsai) +- pod delete forbidden ([#382](https://github.com/apecloud/kubeblocks/pull/382), @free6om) +- check args and components when create cluster ([#384](https://github.com/apecloud/kubeblocks/pull/384), @ldming) +- probe container create event is forbidden ([#386](https://github.com/apecloud/kubeblocks/pull/386), @free6om) +- add missing service in wesql helm chart ([#387](https://github.com/apecloud/kubeblocks/pull/387), @free6om) +- fixed wrong probe image address; chore ([#398](https://github.com/apecloud/kubeblocks/pull/398), @nashtsai) +- ClusterDefitinition check depend on configuration templates (#378) ([#406](https://github.com/apecloud/kubeblocks/pull/406), @sophon-zt) +- when Cluster.terminationPolicy is WipeOut and deleted cluster, the PVC not deleted together ([#407](https://github.com/apecloud/kubeblocks/pull/407), @wangyelei) +- vscode dev container ([#417](https://github.com/apecloud/kubeblocks/pull/417), @heng4fun) +- make install missing backuppolicytemplate ([#432](https://github.com/apecloud/kubeblocks/pull/432), @dengshaojiang) +- endpoint controller test unstable failed ([#443](https://github.com/apecloud/kubeblocks/pull/443), @iziang) + +### Miscellaneous +- Develop branch to main ([#34](https://github.com/apecloud/kubeblocks/pull/34), @ldming) +- Merge develop to main ([#35](https://github.com/apecloud/kubeblocks/pull/35), @ldming) +- add CI/CD pipelines ([#38](https://github.com/apecloud/kubeblocks/pull/38), @JashBook) +- Feature/data protection ([#40](https://github.com/apecloud/kubeblocks/pull/40), @dengshaojiang) +- Update DesignDocTemplate.md ([#51](https://github.com/apecloud/kubeblocks/pull/51), @heng4fun) +- Add DatabaseClusterAffinityDesign.md ([#52](https://github.com/apecloud/kubeblocks/pull/52), @heng4fun) +- Fix typo ([#53](https://github.com/apecloud/kubeblocks/pull/53), @heng4fun) +- doc #56 add custom opsDefinition and opsRequest design doc ([#57](https://github.com/apecloud/kubeblocks/pull/57), @wangyelei) +- fix install_dbctl ([#59](https://github.com/apecloud/kubeblocks/pull/59), @JashBook) +- update dbctl version 0.4.0 ([#77](https://github.com/apecloud/kubeblocks/pull/77), @ldming) +- migrate KubeBlocks core driver operator ([#78](https://github.com/apecloud/kubeblocks/pull/78), @nashtsai) +- support appVersion, clusterDefinition and cluster CR validating webhook ([#83](https://github.com/apecloud/kubeblocks/pull/83), @wangyelei) +- fix ci-test and add badges ([#88](https://github.com/apecloud/kubeblocks/pull/88), @JashBook) +- Feature/unified dbcluster lifecycle ([#89](https://github.com/apecloud/kubeblocks/pull/89), @lynnleelhl) +- fix release publish ([#95](https://github.com/apecloud/kubeblocks/pull/95), @JashBook) +- Support/csi driver volume testing ([#99](https://github.com/apecloud/kubeblocks/pull/99), @nashtsai) +- Feature/unified dbcluster lifecycle ([#100](https://github.com/apecloud/kubeblocks/pull/100), @lynnleelhl) +- add Cluster Status handling. ([#101](https://github.com/apecloud/kubeblocks/pull/101), @wangyelei) +- Refactor/container to podspec ([#102](https://github.com/apecloud/kubeblocks/pull/102), @lynnleelhl) +- CICD add staticcheck ([#106](https://github.com/apecloud/kubeblocks/pull/106), @JashBook) +- rm cicd github_token ([#109](https://github.com/apecloud/kubeblocks/pull/109), @JashBook) +- fix up golangci-lint linter errors ([#110](https://github.com/apecloud/kubeblocks/pull/110), @nashtsai) +- Merge remote-tracking branch 'origin/main' into bugfix/makefile-lint-… ([#112](https://github.com/apecloud/kubeblocks/pull/112), @nashtsai) +- Bugfix/fixded cuetool job ([#113](https://github.com/apecloud/kubeblocks/pull/113), @nashtsai) +- Bugfix/fixded cuetool job ([#114](https://github.com/apecloud/kubeblocks/pull/114), @nashtsai) +- fixed Cluster API controller List owned resources missing Nam… ([#116](https://github.com/apecloud/kubeblocks/pull/116), @nashtsai) +- Feature/vscode devcontainer ([#117](https://github.com/apecloud/kubeblocks/pull/117), @nashtsai) +- feat configure management (#36) ([#118](https://github.com/apecloud/kubeblocks/pull/118), @sophon-zt) +- update brew-install-prerequisite Makefile job, by fixed go com… ([#119](https://github.com/apecloud/kubeblocks/pull/119), @nashtsai) +- Feature/dbctl dbaas ([#122](https://github.com/apecloud/kubeblocks/pull/122), @lynnleelhl) +- add release version ([#133](https://github.com/apecloud/kubeblocks/pull/133), @JashBook) +- fix helm podspec error ([#143](https://github.com/apecloud/kubeblocks/pull/143), @lynnleelhl) +- fix #144 wesql helm chart can support single instance. ([#145](https://github.com/apecloud/kubeblocks/pull/145), @ZhaoDiankui) +- feature #153 dbctl support wesql instance customer-defined replicas. ([#154](https://github.com/apecloud/kubeblocks/pull/154), @ldming) +- implement floating ip based on elastic network interface and private ip ([#159](https://github.com/apecloud/kubeblocks/pull/159), @iziang) +- Feature/dbctl guide ([#165](https://github.com/apecloud/kubeblocks/pull/165), @lynnleelhl) +- tidyup Makefile ([#183](https://github.com/apecloud/kubeblocks/pull/183), @nashtsai) +- Feature/consensus workload ([#194](https://github.com/apecloud/kubeblocks/pull/194), @free6om) +- fix dockerfile and refactor dbctl developer guide ([#199](https://github.com/apecloud/kubeblocks/pull/199), @ldming) +- Support OpsRequest controller, including the following 5 operations ([#202](https://github.com/apecloud/kubeblocks/pull/202), @wangyelei) +- Support/helm notes ([#203](https://github.com/apecloud/kubeblocks/pull/203), @nashtsai) +- update minikube-start Makefile job to check minikube cluster s… ([#209](https://github.com/apecloud/kubeblocks/pull/209), @nashtsai) +- make install missing volumesnapshot crd ([#212](https://github.com/apecloud/kubeblocks/pull/212), @dengshaojiang) +- Support/add license header checker ([#215](https://github.com/apecloud/kubeblocks/pull/215), @nashtsai) +- dbctl support cue template conversion when create a k8s resource ([#216](https://github.com/apecloud/kubeblocks/pull/216), @wangyelei) +- Remove unused field declaration from cue template ([#219](https://github.com/apecloud/kubeblocks/pull/219), @heng4fun) +- Inject prometheus exporter as sidecar ([#224](https://github.com/apecloud/kubeblocks/pull/224), @yimeisun) +- check dependency binary existence when exec make command ([#227](https://github.com/apecloud/kubeblocks/pull/227), @yimeisun) +- integrate simplified monitor stack to KubeBlocks helm chart ([#228](https://github.com/apecloud/kubeblocks/pull/228), @yimeisun) +- Improve reviewable checks ([#236](https://github.com/apecloud/kubeblocks/pull/236), @heng4fun) +- change PR label ([#240](https://github.com/apecloud/kubeblocks/pull/240), @JashBook) +- integrate OpsRequest dbctl Cli and fix the ops bug ([#247](https://github.com/apecloud/kubeblocks/pull/247), @wangyelei) +- Patch clusterdefinition and appversion after uninstall dbaas ([#252](https://github.com/apecloud/kubeblocks/pull/252), @lynnleelhl) +- update internal/dbctl/util/util_test.go, with conditional tests due to extern HTTP service dependency ([#256](https://github.com/apecloud/kubeblocks/pull/256), @nashtsai) +- update apis/dbaas/v1alpha1/appversion_types.go, tidy up API doc and make optional struct field being pointer type ([#260](https://github.com/apecloud/kubeblocks/pull/260), @nashtsai) +- After the dbaas is reinstalled, the cluster fails to be created ([#271](https://github.com/apecloud/kubeblocks/pull/271), @wangyelei) +- update docker/Dockerfile, have maximize docker build cache processin ([#273](https://github.com/apecloud/kubeblocks/pull/273), @nashtsai) +- copy(dst, src) to supersede original array by using a new array in function checkAndUpdatePodVolumes ([#276](https://github.com/apecloud/kubeblocks/pull/276), @yangmeilly) +- cicd add cp controller-gen ([#277](https://github.com/apecloud/kubeblocks/pull/277), @JashBook) +- [deploy/helm] fixed missing labels ([#282](https://github.com/apecloud/kubeblocks/pull/282), @nashtsai) +- Feature/fix release cicd ([#285](https://github.com/apecloud/kubeblocks/pull/285), @JashBook) +- Feature/daprd merge ([#286](https://github.com/apecloud/kubeblocks/pull/286), @nashtsai) +- OpsRequest always in running phase bug ([#290](https://github.com/apecloud/kubeblocks/pull/290), @wangyelei) +- fix chart dependency ([#292](https://github.com/apecloud/kubeblocks/pull/292), @iziang) +- support dbctl version command. output dbctl version, KubeBlocks version and K8s version ([#294](https://github.com/apecloud/kubeblocks/pull/294), @wangyelei) +- Add help doc for API ([#295](https://github.com/apecloud/kubeblocks/pull/295), @lynnleelhl) +- rename appname to kubeblocks ([#296](https://github.com/apecloud/kubeblocks/pull/296), @ldming) +- make dbctl set flag separated with commas ([#297](https://github.com/apecloud/kubeblocks/pull/297), @lynnleelhl) +- monitor enable param in dbctl does not take effect when Cluster Component does not have monitor field ([#299](https://github.com/apecloud/kubeblocks/pull/299), @yimeisun) +- Bugfix/check volumesnapshotclass ([#301](https://github.com/apecloud/kubeblocks/pull/301), @nashtsai) +- optimize release cicd ([#302](https://github.com/apecloud/kubeblocks/pull/302), @JashBook) +- clusterdefinition webhook nil pointer bug ([#312](https://github.com/apecloud/kubeblocks/pull/312), @wangyelei) +- dbctl dbaas install support monitor param to enable monitor stack ([#327](https://github.com/apecloud/kubeblocks/pull/327), @yimeisun) +- tidy up API enum declarations ([#331](https://github.com/apecloud/kubeblocks/pull/331), @nashtsai) +- avoid make all outputs annoying messages when minikube is not … ([#334](https://github.com/apecloud/kubeblocks/pull/334), @weicao) +- tidy tests setup; ensure test objects cleanup before tests ([#336](https://github.com/apecloud/kubeblocks/pull/336), @nashtsai) +- rename chart/image version ([#338](https://github.com/apecloud/kubeblocks/pull/338), @JashBook) +- optimiza minikube-start make job ([#350](https://github.com/apecloud/kubeblocks/pull/350), @nashtsai) +- dbctl output helm notes after install ([#352](https://github.com/apecloud/kubeblocks/pull/352), @yimeisun) +- Add help docs for ConfigTemplateRefs API (#275) ([#353](https://github.com/apecloud/kubeblocks/pull/353), @sophon-zt) +- remove chinese docs temporarily ([#355](https://github.com/apecloud/kubeblocks/pull/355), @heng4fun) +- support anonymous login for grafana ([#357](https://github.com/apecloud/kubeblocks/pull/357), @yimeisun) +- add events notification when CR created, deleted ([#358](https://github.com/apecloud/kubeblocks/pull/358), @wangyelei) +- fix nil pointer crash ([#359](https://github.com/apecloud/kubeblocks/pull/359), @lynnleelhl) +- add cadvisor dashboard ([#361](https://github.com/apecloud/kubeblocks/pull/361), @yimeisun) +- Feature/optimize lb ([#380](https://github.com/apecloud/kubeblocks/pull/380), @iziang) +- format imports and add missing licenses ([#396](https://github.com/apecloud/kubeblocks/pull/396), @iziang) +- upgrade grafana helm chart version ([#410](https://github.com/apecloud/kubeblocks/pull/410), @yimeisun) +- add development guide ([#431](https://github.com/apecloud/kubeblocks/pull/431), @heng4fun) +- tidy up github localflows's actions ([#438](https://github.com/apecloud/kubeblocks/pull/438), @nashtsai) +- add contributing guide ([#456](https://github.com/apecloud/kubeblocks/pull/456), @heng4fun) +- have appversion and clusterdefinition controller with concurrent reconciling ([#458](https://github.com/apecloud/kubeblocks/pull/458), @nashtsai) +- Intermitten unit test error - OpsRequest webhook ([#461](https://github.com/apecloud/kubeblocks/pull/461), @wangyelei) +- remove deprecated 'app.kubernetes.io/created-by' object labels ([#467](https://github.com/apecloud/kubeblocks/pull/467), @nashtsai) \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.2.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.2.0/_category_.yml new file mode 100644 index 00000000..259777b5 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.2.0/_category_.yml @@ -0,0 +1,4 @@ +position: 8 +label: v0.2.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.2.0/v0.2.0.mdx b/docs/en/release-1_0_1/release_notes/v0.2.0/v0.2.0.mdx new file mode 100644 index 00000000..24bd97a7 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.2.0/v0.2.0.mdx @@ -0,0 +1,146 @@ +--- +sidebar_label: v0.2.0 +--- + +# KubeBlocks 0.2.0 (2022-12-30) + +We're happy to announce the release of KubeBlocks 0.2.0! 🚀 🎉 🎈 + +We would like to extend our thanks to all the new and existing contributors who helped make this release happen. + +**Highlights** + +* Support EXPERIMENTAL clickhouse, etcd and mongodb +* Automatic pod container environment variables: + * KB_POD_NAME - Pod Name + * KB_NAMESPACE - Namespace + * KB_SA_NAME - Service Account Name + * KB_NODENAME - Node Name + * KB_HOSTIP - Host IP address + * KB_PODIP - Pod IP address + * KB_PODIPS - Pod IP addresses + * KB_CLUSTER_NAME - KubeBlocks Cluster API object name + * KB_COMP_NAME - Running pod's KubeBlocks Cluster API object's `.spec.components.name` + * KB_CLUSTER_COMP_NAME - Running pod's KubeBlocks Cluster API object's `<.metadata.name>-<.spec.components.name>`, same name is used for Deployment or StatefulSet workload name, and Service object name +* ClusterDefinition API support following automatic variable names: + * under `.spec.connectionCredential`: + * random 8 characters `$(RANDOM_PASSWD)` placeholder, + * self reference map object `$(CONN_CREDENTIAL)[.])` + * example usage: + +```yaml +spec: + connectionCredential: + username: "admin-password" + password: "$(RANDOM_PASSWD)" + "$(CONN_CREDENTIAL).username": "$(CONN_CREDENTIAL).password" + +# output: +spec: + connectionCredential: + username: "admin-password" + password: "" + "admin-password": "" +``` + + * Connection credential secret name place holder `$(CONN_CREDENTIAL_SECRET_NAME)` + * CLI support `list-logs-type` and `logs` command to access cluster logs + * CLI support to list ops + +If you're new to KubeBlocks, visit the [getting started](https://kubeblocks.io) page and +familiarize yourself with KubeBlocks. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +@ahjing99, @dengshaojiang, @free6om, @heng4fun, @iziang, @JashBook, @ldming, @michelle-0808, @nashtsai, @TalktoCrystal, @wangyelei, @xuriwuyun, @yangmeilly, @yimeisun, @ZhaoDiankui + +## What's Changed + +### New Features +- support probe ([#234](https://github.com/apecloud/kubeblocks/pull/234), @xuriwuyun) +- support BestEffortLocal traffic policy ([#440](https://github.com/apecloud/kubeblocks/pull/440), @iziang) +- support cluster list-logs-type and logs cmd to access cluster logs ([#445](https://github.com/apecloud/kubeblocks/pull/445), @yangmeilly) +- using listen/watch instead of timing reconcile in ClusterController and OpsRequestController ([#459](https://github.com/apecloud/kubeblocks/pull/459), @wangyelei) +- probe improvement ([#465](https://github.com/apecloud/kubeblocks/pull/465), @xuriwuyun) +- have service and endpoint controller with concurrent reconciling ([#479](https://github.com/apecloud/kubeblocks/pull/479), @iziang) +- add label selector filter for service ([#508](https://github.com/apecloud/kubeblocks/pull/508), @iziang) +- upgrade kubeblocks wesql version to 8.0.30 ([#531](https://github.com/apecloud/kubeblocks/pull/531), @ZhaoDiankui) +- set cluster default termination policy to delete ([#535](https://github.com/apecloud/kubeblocks/pull/535), @ldming) +- support ops command with list/delete ([#549](https://github.com/apecloud/kubeblocks/pull/549), @wangyelei) +- upgrade grafana version ([#562](https://github.com/apecloud/kubeblocks/pull/562), @JashBook) +- rename dbctl dbaas subcommand to kubeblocks ([#601](https://github.com/apecloud/kubeblocks/pull/601), @ldming) +- make test in minikube ([#640](https://github.com/apecloud/kubeblocks/pull/640), @JashBook) +- dbctl cluster list enhancement ([#675](https://github.com/apecloud/kubeblocks/pull/675), @ldming) +- running&status probe support ([#695](https://github.com/apecloud/kubeblocks/pull/695), @xuriwuyun) +- clickhouse provider ([#708](https://github.com/apecloud/kubeblocks/pull/708), @nashtsai) +- rename dbctl to kbcli ([#729](https://github.com/apecloud/kubeblocks/pull/729), @ldming) +- cli support table printer and refactor layout, rename phase to status ([#732](https://github.com/apecloud/kubeblocks/pull/732), @ldming) +- create cluster require to specify cluster definition and version, auto-completion resource name ([#741](https://github.com/apecloud/kubeblocks/pull/741), @ldming) +- etcd & mongodb provider ([#764](https://github.com/apecloud/kubeblocks/pull/764), @free6om) + +### Bug Fixes +- dev container start failed when without minikube ([#471](https://github.com/apecloud/kubeblocks/pull/471), @heng4fun) +- remove helm login ([#487](https://github.com/apecloud/kubeblocks/pull/487), @ldming) +- only push tag trigger release ([#490](https://github.com/apecloud/kubeblocks/pull/490), @ldming) +- only start node manager on leader ([#503](https://github.com/apecloud/kubeblocks/pull/503), @iziang) +- delete pods when previous pods ready ([#540](https://github.com/apecloud/kubeblocks/pull/540), @free6om) +- describe error when cluster is updating ([#548](https://github.com/apecloud/kubeblocks/pull/548), @ldming) +- kubeblocks default version of wesql is 8.0.30 ([#553](https://github.com/apecloud/kubeblocks/pull/553), @ZhaoDiankui) +- update helm repo if exists to avoid install failure ([#565](https://github.com/apecloud/kubeblocks/pull/565), @ldming) +- cluster phase is incorrect when vertical scaling cluster and can not delete opsRequest when phase is Running ([#574](https://github.com/apecloud/kubeblocks/pull/574), @wangyelei) +- ops request deletion failed in succeed status and create cluster required terminationPolicy ([#581](https://github.com/apecloud/kubeblocks/pull/581), @wangyelei) +- when set AllowVolumeExpansion to true in EBS storageClass, the created cluster still does not support volume expansion ([#587](https://github.com/apecloud/kubeblocks/pull/587), @wangyelei) +- create cluster require to specify the termination policy ([#596](https://github.com/apecloud/kubeblocks/pull/596), @ldming) +- support CheckErr to handle unexpected error ([#613](https://github.com/apecloud/kubeblocks/pull/613), @ldming) +- patch annotation instead of delete event ([#644](https://github.com/apecloud/kubeblocks/pull/644), @free6om) +- the ops cannot be deleted and webhook message is incorrect ([#677](https://github.com/apecloud/kubeblocks/pull/677), @wangyelei) +- fix describe format and support to display default storage class ([#698](https://github.com/apecloud/kubeblocks/pull/698), @ldming) +- event_controller unit test failed ([#701](https://github.com/apecloud/kubeblocks/pull/701), @free6om) +- panic when handling role changed event ([#711](https://github.com/apecloud/kubeblocks/pull/711), @free6om) +- when the cluster executed patch resources requests > limits, cluster is unavailable ([#731](https://github.com/apecloud/kubeblocks/pull/731), @wangyelei) +- for cluster status updating ([#761](https://github.com/apecloud/kubeblocks/pull/761), @xuriwuyun) +- update cluster controller tests for role changed event processing ([#779](https://github.com/apecloud/kubeblocks/pull/779), @xuriwuyun) +- update probe rolecheck timeout ([#796](https://github.com/apecloud/kubeblocks/pull/796), @xuriwuyun) +- update role abserve ([#830](https://github.com/apecloud/kubeblocks/pull/830), @xuriwuyun) +- role probe timeout problem #842 ([#863](https://github.com/apecloud/kubeblocks/pull/863), @xuriwuyun) + +### Miscellaneous +- fix intermittent error when make test ([#272](https://github.com/apecloud/kubeblocks/pull/272), @wangyelei) +- fix #278 rename wesql appversion.name, fix #279 rename wesql cd.name ([#300](https://github.com/apecloud/kubeblocks/pull/300), @ZhaoDiankui) +- rename wesql appversion.name, fix #279 ([#304](https://github.com/apecloud/kubeblocks/pull/304), @ZhaoDiankui) +- Support/consensus associated test cases ([#414](https://github.com/apecloud/kubeblocks/pull/414), @free6om) +- cli support generic delete logic ([#454](https://github.com/apecloud/kubeblocks/pull/454), @ldming) +- upgrade gh api ([#469](https://github.com/apecloud/kubeblocks/pull/469), @JashBook) +- add concurrent reconciles for dataprotection ([#472](https://github.com/apecloud/kubeblocks/pull/472), @dengshaojiang) +- support to generate release notes ([#473](https://github.com/apecloud/kubeblocks/pull/473), @ldming) +- CI add tag to stale issues ([#484](https://github.com/apecloud/kubeblocks/pull/484), @ahjing99) +- rename main helm chart directory to helm ([#512](https://github.com/apecloud/kubeblocks/pull/512), @yimeisun) +- optimize output after dbaas install ([#515](https://github.com/apecloud/kubeblocks/pull/515), @yimeisun) +- probe improvement ([#518](https://github.com/apecloud/kubeblocks/pull/518), @xuriwuyun) +- consensus role label update ([#522](https://github.com/apecloud/kubeblocks/pull/522), @free6om) +- check PR title ([#529](https://github.com/apecloud/kubeblocks/pull/529), @JashBook) +- speed up dbctl cluster list ([#542](https://github.com/apecloud/kubeblocks/pull/542), @ldming) +- add user docs ([#554](https://github.com/apecloud/kubeblocks/pull/554), @TalktoCrystal) +- Create user_doc.yml ([#560](https://github.com/apecloud/kubeblocks/pull/560), @ahjing99) +- change check branch name location ([#571](https://github.com/apecloud/kubeblocks/pull/571), @JashBook) +- enable WeSQL role changed probe ([#576](https://github.com/apecloud/kubeblocks/pull/576), @ldming) +- update WeSQL helm version ([#579](https://github.com/apecloud/kubeblocks/pull/579), @ldming) +- add-user-docs ([#604](https://github.com/apecloud/kubeblocks/pull/604), @michelle-0808) +- have no ST1006 (Poorly chosen receiver name) staticcheck removed ([#614](https://github.com/apecloud/kubeblocks/pull/614), @nashtsai) +- follow staticcheck ST1003 ([#616](https://github.com/apecloud/kubeblocks/pull/616), @iziang) +- update old wesql image ([#648](https://github.com/apecloud/kubeblocks/pull/648), @ldming) +- wesql cluster helm chart is back ([#673](https://github.com/apecloud/kubeblocks/pull/673), @ldming) +- upgrade wesql version ([#717](https://github.com/apecloud/kubeblocks/pull/717), @JashBook) +- add test case for dbctl and fix playground guide ([#718](https://github.com/apecloud/kubeblocks/pull/718), @ldming) +- adjust ci checkout ([#760](https://github.com/apecloud/kubeblocks/pull/760), @JashBook) +- update wesql chart version to 0.1.5 ([#767](https://github.com/apecloud/kubeblocks/pull/767), @ldming) +- fix ci install setup-envtest fail ([#778](https://github.com/apecloud/kubeblocks/pull/778), @JashBook) +- fix misspelling of words ([#818](https://github.com/apecloud/kubeblocks/pull/818), @JashBook) +- adjust the note that contains the subject ([#822](https://github.com/apecloud/kubeblocks/pull/822), @JashBook) +- add helm install/validate before uploading ([#825](https://github.com/apecloud/kubeblocks/pull/825), @JashBook) + +## Breaking Changes + +None. diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.3.0/_category_.yml new file mode 100644 index 00000000..5c564ab7 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/_category_.yml @@ -0,0 +1,4 @@ +position: 7 +label: v0.3.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.0.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.0.mdx new file mode 100644 index 00000000..f63a6edd --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.0.mdx @@ -0,0 +1,151 @@ +--- +sidebar_label: v0.3.0 +--- + +# KubeBlocks 0.3.0 (2023-01-19) + +We're happy to announce the release of KubeBlocks 0.3.0! 🚀 🎉 🎈 + +We would like to extend our appreciation to all contributors who helped make this release happen. + +**Highlights** + + * Horizontal scaling for ApeCloud MySQL, you can add/remove read-only instances. + * Replication lag metrics for ApeCloud MySQL. + * Alerting rules for ApeCloud MySQL. + * Policy-based backups. + * Preflight checks to pre-check the deployment environment. + * Show connection example with kbcli connect enhancement + * List ClusterDefinition, ClusterVersion, and users with kbcli. + +If you're new to KubeBlocks, visit the [getting started](https://kubeblocks.io) page and get a quick start with KubeBlocks. + +> **Note: This release contains a few [breaking changes](#breaking-changes).** + +See [this](#upgrading-to-kubeblocks-0.3.0) section to upgrade KubeBlocks to version 0.3.0. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +@dengshaojiang, @free6om, @heng4fun, @JashBook, @ldming, @lynnleelhl, @max8899, @michelle-0808, @nashtsai, @shanshanying, @sophon-zt, @wangyelei, @weicao, @xuriwuyun, @yangmeilly, @yimeisun, @ZhaoDiankui + +## What's Changed + +### New Features +- horizontal scaling ([#330](https://github.com/apecloud/kubeblocks/pull/330), @lynnleelhl) +- SystemAccount Creation on Demand ([#511](https://github.com/apecloud/kubeblocks/pull/511), @shanshanying) +- reconfigure configmap ([#567](https://github.com/apecloud/kubeblocks/pull/567), @sophon-zt) +- update cluster with kbcli ([#826](https://github.com/apecloud/kubeblocks/pull/826), @ldming) +- timeout seconds for probe ([#864](https://github.com/apecloud/kubeblocks/pull/864), @xuriwuyun) +- OpsRequest reconfigure ([#866](https://github.com/apecloud/kubeblocks/pull/866), @sophon-zt) +- create namespace when installing KubeBlocks ([#880](https://github.com/apecloud/kubeblocks/pull/880), @ldming) +- automatically move issue when released ([#895](https://github.com/apecloud/kubeblocks/pull/895), @JashBook) +- cli describe and list enhancement ([#896](https://github.com/apecloud/kubeblocks/pull/896), @ldming) +- OpsRequest progress/progressDetails and last configuration of operations ([#921](https://github.com/apecloud/kubeblocks/pull/921), @wangyelei) +- add troubleshoot.sh module and preflight checks for KubeBlocks ([#939](https://github.com/apecloud/kubeblocks/pull/939), @yangmeilly) +- auto-schedule for backup policy ([#950](https://github.com/apecloud/kubeblocks/pull/950), @dengshaojiang) +- show connection example with cli connect enhancement ([#951](https://github.com/apecloud/kubeblocks/pull/951), @ldming) +- ApeCloud MySQL consensus metrics and dashboards ([#952](https://github.com/apecloud/kubeblocks/pull/952), @yimeisun) +- upload kbcli to public repo ([#960](https://github.com/apecloud/kubeblocks/pull/960), @JashBook) +- alerting rules for MySQL and cadvisor ([#962](https://github.com/apecloud/kubeblocks/pull/962), @yimeisun) +- prometheus and alertmanager with persistent volume, resource limitation and replicas ([#963](https://github.com/apecloud/kubeblocks/pull/963), @yimeisun) +- update ApeCloud MySQL default config template ([#965](https://github.com/apecloud/kubeblocks/pull/965), @sophon-zt) +- list ClusterDefinition, ClusterVersion and users with kbcli ([#967](https://github.com/apecloud/kubeblocks/pull/967), @ldming) +- use dashboard command to simplify kbcli KubeBlocks output message ([#968](https://github.com/apecloud/kubeblocks/pull/968), @yimeisun) +- kbcli upgrade ([#977](https://github.com/apecloud/kubeblocks/pull/977), @lynnleelhl) +- add make manifests to ci ([#990](https://github.com/apecloud/kubeblocks/pull/990), @JashBook) +- support describe ops ([#995](https://github.com/apecloud/kubeblocks/pull/995), @wangyelei) +- enhance list ops and add double-check for operations ([#1000](https://github.com/apecloud/kubeblocks/pull/1000), @wangyelei) + +### Bug Fixes +- should stop installing helm chart when last release is not successfully deployed ([#856](https://github.com/apecloud/kubeblocks/pull/856), @max8899) +- when opsRequest is running and the spec clusterRef is updated by user, the OpsRequest jobs will inconsistent. ([#879](https://github.com/apecloud/kubeblocks/pull/879), @wangyelei) +- remove omitempty to prevent empty value when marshal ([#883](https://github.com/apecloud/kubeblocks/pull/883), @lynnleelhl) +- update helm template ([#890](https://github.com/apecloud/kubeblocks/pull/890), @xuriwuyun) +- build pdb if minReplicas non-zero ([#905](https://github.com/apecloud/kubeblocks/pull/905), @lynnleelhl) +- use password in setup scripts ([#912](https://github.com/apecloud/kubeblocks/pull/912), @lynnleelhl) +- backup resources not clean after h-scale ([#914](https://github.com/apecloud/kubeblocks/pull/914), @lynnleelhl) +- optimize DoNotTerminate ([#917](https://github.com/apecloud/kubeblocks/pull/917), @lynnleelhl) +- fix bug of horizontal scaling and add UT for it #922 ([#923](https://github.com/apecloud/kubeblocks/pull/923), @weicao) +- updating pvc storage size throws error ([#926](https://github.com/apecloud/kubeblocks/pull/926), @weicao) +- add exec command in ApeCloud MySQL setup.sh script ([#936](https://github.com/apecloud/kubeblocks/pull/936), @ZhaoDiankui) +- make error for duplicate constant ([#944](https://github.com/apecloud/kubeblocks/pull/944), @ldming) +- modify kbcli and readme docs ([#947](https://github.com/apecloud/kubeblocks/pull/947), @michelle-0808) +- fixed 'goimports' Makefile job that tidy up vendor/ go files ([#953](https://github.com/apecloud/kubeblocks/pull/953), @nashtsai) +- probe event lost ([#964](https://github.com/apecloud/kubeblocks/pull/964), @xuriwuyun) +- delete OpsRequest failed when cluster is deleted and support sending events when probe timeout ([#973](https://github.com/apecloud/kubeblocks/pull/973), @wangyelei) +- do nothing if error is already exists ([#976](https://github.com/apecloud/kubeblocks/pull/976), @lynnleelhl) +- pod Annotations is none when changed the component monitor to true, and update any cluster.spec variables, the cluster maybe goes to Updating. ([#979](https://github.com/apecloud/kubeblocks/pull/979), @wangyelei) +- check if KubeBlocks is installed when install KubeBlocks ([#981](https://github.com/apecloud/kubeblocks/pull/981), @ldming) +- make manifest leading to inconsistent crd yaml ([#987](https://github.com/apecloud/kubeblocks/pull/987), @shanshanying) +- update makefile for probe ([#992](https://github.com/apecloud/kubeblocks/pull/992), @xuriwuyun) +- component doc for consensusset ([#999](https://github.com/apecloud/kubeblocks/pull/999), @free6om) +- remove backuptools custom resources and definitions when uninstall kubeblocks ([#1008](https://github.com/apecloud/kubeblocks/pull/1008), @ldming) +- update ApeCloud MySQL config constraint (#1013) ([#1018](https://github.com/apecloud/kubeblocks/pull/1018), @sophon-zt) +- OS X Process.Name function requires elevated permissions (#1012) ([#1019](https://github.com/apecloud/kubeblocks/pull/1019), @sophon-zt) +- support k8sResource convert for cue validate ApeCloud MySQL config (#1020) ([#1036](https://github.com/apecloud/kubeblocks/pull/1036), @sophon-zt) +- delete unused parameters for config-manager-sidecar (#1031) ([#1037](https://github.com/apecloud/kubeblocks/pull/1037), @sophon-zt) +- remove configuration configMaps when uninstall KubeBlocks ([#1039](https://github.com/apecloud/kubeblocks/pull/1039), @ldming) +- update rbac to fix deletion hang ([#1051](https://github.com/apecloud/kubeblocks/pull/1051), @lynnleelhl) +- remove cli upgrade KubeBlocks default version ([#1069](https://github.com/apecloud/kubeblocks/pull/1069), @ldming) +- horizontal scaling bugfix ([#1070](https://github.com/apecloud/kubeblocks/pull/1070), @lynnleelhl) +- kbcli restore cluster nil pointer error. ([#1072](https://github.com/apecloud/kubeblocks/pull/1072), @dengshaojiang) + +### Miscellaneous +- user docs ([#665](https://github.com/apecloud/kubeblocks/pull/665), @michelle-0808) +- refactor OpsRequest API ([#843](https://github.com/apecloud/kubeblocks/pull/843), @wangyelei) +- fix make manifests no effective ([#844](https://github.com/apecloud/kubeblocks/pull/844), @wangyelei) +- fix and tidy clusterdef controller test ([#851](https://github.com/apecloud/kubeblocks/pull/851), @weicao) +- refactor AppVersion API to ClusterVersion API ([#852](https://github.com/apecloud/kubeblocks/pull/852), @wangyelei) +- update to ginkgo/v2 package ([#867](https://github.com/apecloud/kubeblocks/pull/867), @nashtsai) +- add test cases for cue_value ([#887](https://github.com/apecloud/kubeblocks/pull/887), @weicao) +- add clusterversion_types_test ([#891](https://github.com/apecloud/kubeblocks/pull/891), @weicao) +- fix and tidy ut in cluster_controller_test ([#898](https://github.com/apecloud/kubeblocks/pull/898), @weicao) +- add some comments to code and improve variable naming ([#906](https://github.com/apecloud/kubeblocks/pull/906), @weicao) +- improve the consensus test, split it into an UT and a FT ([#930](https://github.com/apecloud/kubeblocks/pull/930), @weicao) +- use golang generics to avoid duplicated code ([#959](https://github.com/apecloud/kubeblocks/pull/959), @weicao) +- do some code tidy up in cluster controller while reading it ([#982](https://github.com/apecloud/kubeblocks/pull/982), @weicao) +- rename components.type name to MySQL and upgrade ApeCloud MySQL version ([#985](https://github.com/apecloud/kubeblocks/pull/985), @ZhaoDiankui) +- improve ut of systemaccount ([#993](https://github.com/apecloud/kubeblocks/pull/993), @weicao) +- fix the infinite loop in getAvailableContainerPorts ([#994](https://github.com/apecloud/kubeblocks/pull/994), @weicao) +- improve opsrequest controller ut ([#997](https://github.com/apecloud/kubeblocks/pull/997), @weicao) +- update probe API crd ([#1003](https://github.com/apecloud/kubeblocks/pull/1003), @xuriwuyun) +- change values image tag ([#1007](https://github.com/apecloud/kubeblocks/pull/1007), @JashBook) +- change values image tag ([#1010](https://github.com/apecloud/kubeblocks/pull/1010), @JashBook) +- add lib to ci ([#1034](https://github.com/apecloud/kubeblocks/pull/1034), @JashBook) +- support install and uninstall nyan cat demo application ([#1041](https://github.com/apecloud/kubeblocks/pull/1041), @heng4fun) + +## Upgrading to KubeBlocks 0.3.0 + +To upgrade to this release of KubeBlocks, follow the steps here to ensure a smooth upgrade. + +Release Notes for `v0.3.0`: +- Rename CRD name `backupjobs.dataprotection.kubeblocks.io` to `backups.dataprotection.kubeblocks.io` + - upgrade KubeBlocks with the following command: + ``` + helm upgrade --install kubeblocks kubeblocks/kubeblocks --version 0.3.0 + ``` + - after you upgrade KubeBlocks, check CRD `backupjobs.dataprotection.kubeblocks.io` and delete it + ``` + kubectl delete crd backupjobs.dataprotection.kubeblocks.io + ``` +- Rename CRD name `appversions.dbaas.kubeblocks.io` to `clusterversions.dbaas.kubeblocks.io` + - before you upgrade KubeBlocks, please backup your Cluster CR yaml first. + ``` + kubectl get cluster -oyaml > clusters.yaml + ``` + then replace all spec.appVersionRef to spec.clusterVersionRef in the clusters.yaml. + + Then, handle OpsRequest CR the same way. + - after you upgrade KubeBlocks, you can delete the CRD `appversions.dbaas.kubeblocks.io` + ``` + kubectl delete crd appversions.dbaas.kubeblocks.io + ``` + the last step, use the above backup of Clusters and OpsRequests to apply them. + ``` + kubectl apply -f clusters.yaml + ``` +## Breaking Changes + +- auto-schedule for backup policy ([#950](https://github.com/apecloud/kubeblocks/pull/950), @dengshaojiang) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.1.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.1.mdx new file mode 100644 index 00000000..cdb7f8e4 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.1.mdx @@ -0,0 +1,10 @@ +--- +sidebar_label: v0.3.1 +--- + +# KubeBlocks 0.3.1 (2023-01-28) + +## What's Changed + +### Bug Fixes +- rename clusterversion.name wesql to apecloud-mysql ([#1095](https://github.com/apecloud/kubeblocks/pull/1095), @ZhaoDiankui) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.2.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.2.mdx new file mode 100644 index 00000000..ec5dd0ec --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.2.mdx @@ -0,0 +1,16 @@ +--- +sidebar_label: v0.3.2 +--- + +# KubeBlocks 0.3.2 (2023-01-30) + +## What's Changed + +### Bug Fixes +- OS X Process.Name function requires elevated permissions ([#1019](https://github.com/apecloud/kubeblocks/pull/1019), @sophon-zt) +- remove cached entry from SecretMap ([#1044](https://github.com/apecloud/kubeblocks/pull/1044), @shanshanying) +- pvc bound check wrong ([#1084](https://github.com/apecloud/kubeblocks/pull/1084), @lynnleelhl) +- check existence of address before retrieving IP ([#1094](https://github.com/apecloud/kubeblocks/pull/1094), @shanshanying) +- systemaccount controller doesn't set secret's owner when it refers a existing one ([#1098](https://github.com/apecloud/kubeblocks/pull/1098), @weicao) +- kbcli snapshot-controller.enabled supported in minikube ([#1124](https://github.com/apecloud/kubeblocks/pull/1124), @dengshaojiang) +- improve setup scripts to tolerate timeout error ([#1125](https://github.com/apecloud/kubeblocks/pull/1125), @lynnleelhl) \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.3.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.3.mdx new file mode 100644 index 00000000..7d19c7bc --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.3.mdx @@ -0,0 +1,10 @@ +--- +sidebar_label: v0.3.3 +--- + +# KubeBlocks 0.3.3 (2023-01-31) + +## What's Changed + +### Bug Fixes +- external resource leaks when delete cluster ([#1056](https://github.com/apecloud/kubeblocks/pull/1056), @weicao) \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.4.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.4.mdx new file mode 100644 index 00000000..fa2189a9 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.4.mdx @@ -0,0 +1,11 @@ +--- +sidebar_label: v0.3.4 +--- + +# KubeBlocks 0.3.4 (2023-02-02) + +## What's Changed + +### New Features +- rename cli cluster flag and adjust default value ([#1181](https://github.com/apecloud/kubeblocks/pull/1181), @ldming) +- support generate random cluster name if name is not specified ([#1175](https://github.com/apecloud/kubeblocks/pull/1175), @ldming) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.5.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.5.mdx new file mode 100644 index 00000000..ccf93155 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.5.mdx @@ -0,0 +1,12 @@ +--- +sidebar_label: v0.3.5 +--- + +# KubeBlocks 0.3.5 (2023-02-03) + +## What's Changed + +### Bug Fixes +- pvc size not synced to cluster ([#1180](https://github.com/apecloud/kubeblocks/pull/1180), @lynnleelhl) +- make upgrade error more friendly ([#1208](https://github.com/apecloud/kubeblocks/pull/1208), @lynnleelhl) +- cluster create command build default volumeClaimTemplates ([#1209](https://github.com/apecloud/kubeblocks/pull/1209), @ldming) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.6.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.6.mdx new file mode 100644 index 00000000..e22c4cfe --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.6.mdx @@ -0,0 +1,10 @@ +--- +sidebar_label: v0.3.6 +--- + +# KubeBlocks 0.3.6 (2023-02-04) + +## What's Changed + +### Bug Fixes +- init cluster info not consistent ([#1231](https://github.com/apecloud/kubeblocks/pull/1231), @lynnleelhl) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.7.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.7.mdx new file mode 100644 index 00000000..7b81bbab --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.7.mdx @@ -0,0 +1,18 @@ +--- +sidebar_label: v0.3.7 +--- + +# KubeBlocks 0.3.7 (2023-02-10) + +## What's Changed + +### New Features +- backuppolicytemplate improvement ([#1186](https://github.com/apecloud/kubeblocks/pull/1186), @dengshaojiang) +- support postgresql ([#1255](https://github.com/apecloud/kubeblocks/pull/1255), @ZhaoDiankui) +- add postgresql grafana dashboards ([#1349](https://github.com/apecloud/kubeblocks/pull/1349), @yimeisun) +- add postgresql alert rules ([#1350](https://github.com/apecloud/kubeblocks/pull/1350), @yimeisun) +- cli support to connect to postgresql ([#1359](https://github.com/apecloud/kubeblocks/pull/1359), @ldming) + +### Bug Fixes +- cli version output correctly when kubernetes cluster does not work ([#1292](https://github.com/apecloud/kubeblocks/pull/1292), @ldming) +- build kbcli with CGO_ENABLED=0 ([#1339](https://github.com/apecloud/kubeblocks/pull/1339), @ldming) diff --git a/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.8.mdx b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.8.mdx new file mode 100644 index 00000000..8e931256 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.3.0/v0.3.8.mdx @@ -0,0 +1,13 @@ +--- +sidebar_label: v0.3.8 +--- +# KubeBlocks 0.3.8 (2023-02-12) + +## What's Changed + +### New Features +- cli support to set cluster resource ([#1353](https://github.com/apecloud/kubeblocks/pull/1353), @ldming) +- support more metrics for postgresql exporter ([#1386](https://github.com/apecloud/kubeblocks/pull/1386), @yimeisun) + +### Bug Fixes +- postgresql poststart script run fail ([#1384](https://github.com/apecloud/kubeblocks/pull/1384), @yimeisun) diff --git a/docs/en/release-1_0_1/release_notes/v0.4.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.4.0/_category_.yml new file mode 100644 index 00000000..2f995ac6 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.4.0/_category_.yml @@ -0,0 +1,4 @@ +position: 6 +label: v0.4.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.4.0/v0.4.0.mdx b/docs/en/release-1_0_1/release_notes/v0.4.0/v0.4.0.mdx new file mode 100644 index 00000000..edb423a5 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.4.0/v0.4.0.mdx @@ -0,0 +1,80 @@ +# KubeBlocks 0.4.0 (2023-03-10) + +We are happy to announce the release of KubeBlocks 0.4.0 with some exciting new features and improvements. KubeBlocks v0.4.0 now supports PostgreSQL databases and has improved resource isolation, security, and usability. 🚀 🎉 🎈 + +## Highlights + +* KubeBlocks supports PostgreSQL v14.7 [#11227](https://github.com/apecloud/kubeblocks/issues/1127) + + After the support of the world's most popular open source database MySQL, KubeBlocks now supports the world's most advanced open source database PostgreSQL. PostgreSQL offers a wide range of data types and excels at managing spatial locations and geographic information. To provide a seamless, ready-to-use experience for users, PostgreSQL clusters created by KubeBlocks come with popular plugins such as PostGIS pre-installed by default. + Currently, the PostgreSQL clusters created by KubeBlocks do not support cross-AZ high availability, and there is a risk of data loss in extreme scenarios where block devices are damaged. As a result, they are recommended only for testing purposes. However, in future versions, data reliability, service availability, and feature richness will continue to be enhanced. + +* KubeBlocks simplifies resource isolation settings at the node level. [#1533](https://github.com/apecloud/kubeblocks/issues/1533) + + Stateful services can be impacted by "noisy neighbors," but KubeBlocks has addressed this issue by enabling users to run database clusters on specific Node groups through Taint and Label settings, while still being able to run with "noisy neighbors" when resources are scarce. This improvement gives users greater control over isolation without adding complexity to the interaction, and without requiring them to set affinity. + +* KubeBlocks provides an Endpoint for being accessed outside of the K8s cluster. [#1024](https://github.com/apecloud/kubeblocks/issues/1024), [#1144](https://github.com/apecloud/kubeblocks/issues/1144) + You may need to access KubeBlocks database clusters through VPC or a public Endpoint in scenarios such as PoC or data migration. However, to apply for an Endpoint, users need to learn about the cloud vendor's network ACLs, Load Balancer configuration, and identity and permission management systems. To reduce the learning curve for users, KubeBlocks integrates with the cloud vendor's Load Balancers, enabling smooth network access with just one expose command executed on the target database cluster. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +@iziang, @ldming, @lynnleelhl, @nashtsai, @wangyelei, @weicao, @xuriwuyun, @Y-Rookie, @heng4fun + +## What's Changed + +### New Features + +#### PostgreSQL + +* Lifecycle management in Standalone mode includes creation, deletion, restart, vertical scaling, and horizontal scaling. [#1324](https://github.com/apecloud/kubeblocks/issues/1324), [#1322](https://github.com/apecloud/kubeblocks/issues/1322) +* Full backups based on snapshots, which can be initiated manually or automatically. These snapshot backups can be completed within two minutes without affecting the performance of the instance. [#901](https://github.com/apecloud/kubeblocks/issues/901) +* Full recovery based on snapshots. With sufficient resources, users can quickly restore the full amount of data to the new database cluster. + +#### Compatibility +- Pass the AWS EKS v1.22 / v1.23 / v1.24 / v1.25 compatibility test. + +#### Maintainability + +- Users are able to modify the parameter configuration of the database cluster. When modifying parameters, kbcli checks the validation of the parameter settings. They can also compare differences between the current and previous versions through historical records. [#849](https://github.com/apecloud/kubeblocks/issues/849) + - Parameters of MySQL are more than 500, including max_connections, max_allowed_packet, table_open_cache and so on. + - Parameters of PostgreSQL are more than 350, including max_connections, shared_buffers, wal_level and so on. [#1323](https://github.com/apecloud/kubeblocks/issues/1323) +- Supports manual start or stop of database clusters by users to save computing resources, and data or backups will not be deleted during the stopped period. + +#### Easy of Use + +- Using kbcli to install KubeBlocks, kbcli will choose the suitable addons to install according to running environment. [#928](https://github.com/apecloud/kubeblocks/issues/928), [#1354](https://github.com/apecloud/kubeblocks/issues/1354) +- Using kbcli to install KubeBlocks, the namespace is by default kb-system. You can specify the namespace. Note: The database cluster is created in default namespace or specified namespace. [#1371](https://github.com/apecloud/kubeblocks/issues/1371) +- Using kbcli to create database clusters, if the cluster name is not specified, the system generates cluster name. [#792](https://github.com/apecloud/kubeblocks/issues/792) +- Using kbcli to create database clusters, you can use--set to configure resources of the cluster, including CPU, memory, storage size, and replica numbers.[#1337](https://github.com/apecloud/kubeblocks/issues/1337) +- kbcli playground supports the one-click launch of AWS EKS runtime environment to experience the full set of KubeBlocks functions. [#1142](https://github.com/apecloud/kubeblocks/issues/1142) + +#### Resource Isolation + +- KubeBlocks has built-in affinity and toleration configurations on both data plane and control plane to prevent stateless applications from competing for KubeBlocks' runtime resources. [#1533](https://github.com/apecloud/kubeblocks/issues/1533) +- KubeBlocks' database cluster introduces tenant types, where dedicated tenancy can avoid the behavior of database clusters competing for runtime resources with each other. [#931](https://github.com/apecloud/kubeblocks/issues/931) + +#### Observability + +- Support PostgreSQL monitoring, including [#902](https://github.com/apecloud/kubeblocks/issues/902) + - Check Connections, Tuples, Queries, Transactions & WAL, Conflicts & Locks, Buffers & Blocks operations, Temp files, Database Size and the CPU, memory, traffic of pod. + - Built-in common alarm rules, including abnormal events such as restart, slow query, increasing in the number of connections, decreasing in Cache hit rate, deadlock, and AutoVacuum etc. +- Support customized robot alarms for Slack, Feishu, Wechat and DingTalks. [#1536](https://github.com/apecloud/kubeblocks/issues/1536) + + +### Bug Fixes + +- ginkgo v2 error fix ([#1011](https://github.com/apecloud/kubeblocks/pull/1011), @lynnleelhl) +- fixed deploy/postgresqlcluster/templates/cluster.yaml template error ([#1043](https://github.com/apecloud/kubeblocks/pull/1043), @nashtsai) +- fixed PostgreSQL chart conf volume error ([#1045](https://github.com/apecloud/kubeblocks/pull/1045), @nashtsai) +- terminate the cli installation if error occurs ([#1147](https://github.com/apecloud/kubeblocks/pull/1147), @ldming) +- build kbcli with CGO_ENABLED=0 ([#1339](https://github.com/apecloud/kubeblocks/pull/1339), @ldming) +- container run as root and rename cd.name ([#1461](https://github.com/apecloud/kubeblocks/pull/1461), @ZhaoDiankui) +- check sts and pods revision ([#1470](https://github.com/apecloud/kubeblocks/pull/1470), @heng4fun) +- install nyancat failed when KubeBlocks repo does not exist ([#1474](https://github.com/apecloud/kubeblocks/pull/1474), @heng4fun) + +## Breaking changes + +- Breaking changes between v0.4 and v0.3. Uninstall v0.3 before installing v0.4. + - Resource group name changes from dbaas.kubeblocks.io to apps.kubeblocks.io. \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.5.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.5.0/_category_.yml new file mode 100644 index 00000000..07acdd33 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.5.0/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: v0.5.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/release_notes/v0.5.0/v0.5.0.mdx b/docs/en/release-1_0_1/release_notes/v0.5.0/v0.5.0.mdx new file mode 100644 index 00000000..af00bb71 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.5.0/v0.5.0.mdx @@ -0,0 +1,271 @@ +# KubeBlocks 0.5.0 (2023-05-18) + +We're happy to announce the release of KubeBlocks 0.5.0! 🚀 🎉 🎈 + +This release introduces Redis, a key-value database, and MongoDB, a document-based database. It also supports the primary-secondary topology of PostgreSQL, adapts to more public cloud vendors' hosted Kubernetes versions, improves data backup and recovery experiences, and builds basic data migration capability. We noticed that some users may think that K8s reduces database performance. So in this release we include a comparison test result to explain the throughput and RT differences of various MySQL 8.0 deployment forms on AWS. + +We would like to extend our appreciation to all contributors who helped make this release happen. + +## **Highlights** + +- KubeBlocks supports the primary-secondary topology of PostgreSQL + Users can actively switch the primary-secondary role of the database cluster with kbcli, or passively trigger failover by deleting a specified Kubernetes pod with kubectl. Failover generally completes within 30 seconds when there are no long transactions and large table DDLs. +- KubeBlocks supports Redis v7.0 + Redis is currently the most popular open-source key-value database, supporting data types such as key-value, string, list, set, hash table, and ordered set. It provides extremely fast data read and write operations and is suitable for cache scenarios in e-commerce, social communication, game, and other internet applications. To provide stable, secure, and efficient Redis services to users, KubeBlocks has adopted Redis 7.0 version, which is currently recommended officially, supporting standalone and primary-secondary topologies. Thus, users can perform operations such as creating, deleting, scaling, backing up, restoring, monitoring, alerting, and changing parameters of Redis clusters in development, testing, and production environments. +- KubeBlocks supports MongoDB v5.0 + MongoDB is currently the most popular document-based database, using JSON data types and dynamic schema designs to maintain high flexibility and scalability. KubeBlocks supports the replica set topology of MongoDB v5.0, providing data redundancy and automatic failover capabilities, ensuring data availability and consistency in the event of a node failure. The replica set topology cluster has one primary node (Primary) and several secondary nodes (Secondary), with the primary node handling all write requests and the secondary nodes handling some read requests. If the primary node fails, one of the secondary nodes is elected as the new primary node. +- KubeBlocks supports the private deployment of ChatGPT retrieval plugin + For users who do not want to expose sensitive information (such as company documents, meeting minutes, emails), OpenAI has open-sourced the ChatGPT retrieval plugin to enhance the ChatGPT experience. As long as users meet OpenAI's requirements, they can run the ChatGPT retrieval plugin through KubeBlocks addon, store the vectorized data of sensitive information in a private database, and enable ChatGPT to have longer memory of the context while ensuring information security. +- KubeBlocks supports one-command launching of playgrounds on Alibaba Cloud, Tencent Cloud, and GCP + Public cloud vendors' hosted Kubernetes services have significant differences in version, functionality, and integration, so even if the deployment of stateful services is not difficult, but Kubernetes administrators have to do a lot of extra heavy lifting to run stateful services normally. After supporting AWS, KubeBlocks provides the ability to one-command launch playgrounds on Alibaba Cloud, Tencent Cloud, and GCP. Users only need to set up public cloud AK locally, and then execute the kbcli playground init command, and KubeBlocks will automatically apply for resources and configure permissions in the specified region, making it easy for users to experience complete functionality. After trying KubeBlocks out, you can clean up the playground environment with one command to avoid incurring costs. + +## **Breaking changes** + +- Breaking changes between v0.5 and v0.4. Uninstall v0.4 (including any older version) before installing v0.5. + - Move the backupPolicyTemplate API from dataprotection group to apps group. + Before installing v0.5, please ensure that the resources have been cleaned up: + ``` + kubectl delete backuppolicytemplates.dataprotection.kubeblocks.io --all + kubectl delete backuppolicies.dataprotection.kubeblocks.io --all + ``` + - redefines the phase of cluster and component. + Before installing v0.5, please ensure that the resources have been cleaned up: + ``` + kubectl delete clusters.apps.kubeblocks.io --all + kubectl delete opsrequets.apps.kubeblocks.io --all + ``` +- `addons.extensions.kubeblocks.io` API deleted `spec.helm.valuesMapping.jsonMap.additionalProperties`, `spec.helm.valuesMapping.valueMap.additionalProperties`, `spec.helm.valuesMapping.extras.jsonMap.additionalProperties` and `spec.helm.valuesMapping.extras.valueMap.additionalProperties` attributes that was introduced by CRD generator, all existing Addons API YAML shouldn't have referenced these attributes. + + +## **Known issues and limitations** +* Limitations of cluster's horizontal scale operation: + * Only support VolumeSnapshot API to make a clone of Cluster's PV for syncing data when horizontal scaling. + * Only 1st pod container and 1st volume mount associated PV will be processed for VolumeSnapshot, do assure that data volume is placed in 1st pod container's 1st volume mount. + * Unused PVCs will be deleted in 30 minutes after scale in. + +If you're new to KubeBlocks, visit the [getting started](https://github.com/apecloud/kubeblocks/blob/v0.5.0/docs/user_docs/quick_start_guide.md) page and get a quick start with KubeBlocks. + +> **Note: This release contains a few [breaking changes](#breaking-changes).** + +See [this](#upgrading-to-kubeblocks-0.5.0) section to upgrade KubeBlocks to version 0.5.0. + +## Acknowledgements + +Thanks to everyone who made this release possible! + +@1aal, @free6om, @heng4fun, @iziang, @kizuna-lek, @ldming, @nashtsai, @nayutah, @sophon-zt, @TalktoCrystal, @xuriwuyun, @Y-Rookie, @yangmeilly, @ZhaoDiankui + +## What's Changed +### New Features +#### MySQL +- Support ZEngine storage engine +- Account management supports creating, modifying, and deleting database accounts with different permissions + PostgreSQL +- Support migration from AWS RDS to KubeBlocks, supporting pre-checks, full migration, and incremental synchronization, verifying the data migration capabilities of CadenceWorkflow and OpenStreetMap +- Support for pgvector extension +- Support for the primary-secondary topology of PostgreSQL +- Automated failover and self-healing +- Support point-in-time recovery +- Account management supports creating, modifying, and deleting database accounts with different permissions + +#### Redis +- Full lifecycle management, including creation, deletion, restart, horizontal/vertical scaling +- Support Redis primary-secondary topology +- Automated failover and self-healing +- Support snapshot backup and recovery +- Metric monitoring, including cluster's basic operation status, connection, OS resources, performance, primary-secondary replication status and other metrics +- Alerts including cluster downtime, OS resource, abnormal connection number, primary-secondary replication abnormality, etc. +- Parameter reconfigure +- Account management + +#### MongoDB +- Full lifecycle management, including creation, deletion, restart, vertical scaling, and disk expansion +- Endpoint exposes the access addresses of all nodes +- File-based full backup and recovery +- Automated failover and self-healing +- Monitoring, alerting and logs +- Parameter reconfigure + +### Improvements +- add support of Redis cluster ([#1886](https://github.com/apecloud/kubeblocks/pull/1886), @heng4fun) +- add support of ApeCloud MySQL-Scale (Vitess) cluster ([#2116](https://github.com/apecloud/kubeblocks/pull/2116), @ZhaoDiankui) +- add support of MongoDB cluster ([#2182](https://github.com/apecloud/kubeblocks/pull/2182), [#2682](https://github.com/apecloud/kubeblocks/pull/2682), @xuriwuyun) +- kbcli playground add supports of more cloud providers ([#2241](https://github.com/apecloud/kubeblocks/pull/2241), @ldming) +- add support of standalone Milvus DB (VectorDB) ([#2310](https://github.com/apecloud/kubeblocks/pull/2310), @nayutah) +- PostgreSQL add support of pgvector extension ([#2406](https://github.com/apecloud/kubeblocks/pull/2406), @ldming) +- complete cleanup of KubeBlocks for playground destroy ([#2457](https://github.com/apecloud/kubeblocks/pull/2457), @ldming) +- Addon resources add failed pod logs to status condition errors and events ([#3058](https://github.com/apecloud/kubeblocks/pull/3058), @nashtsai) + +### Bug Fixes +- cli playground use default kubeconfig file ([#2150](https://github.com/apecloud/kubeblocks/pull/2150), @ldming) +- update running check ([#2174](https://github.com/apecloud/kubeblocks/pull/2174), @xuriwuyun) +- set cluster default storage size to 20Gi ([#2254](https://github.com/apecloud/kubeblocks/pull/2254), @ldming) +- cli kubeblocks upgrade command output dashboard info ([#2290](https://github.com/apecloud/kubeblocks/pull/2290), @ldming) +- set default storage size to 10Gi for TKE ([#2317](https://github.com/apecloud/kubeblocks/pull/2317), @ldming) +- cli playground pull latest cloud provider repo ([#2373](https://github.com/apecloud/kubeblocks/pull/2373), @ldming) +- cli playground does not output error message when kubernetes cluster is not ready ([#2391](https://github.com/apecloud/kubeblocks/pull/2391), @ldming) +- github action uploads kbcli asset for windows and add powershell script to install on windows ([#2449](https://github.com/apecloud/kubeblocks/pull/2449), @1aal) +- trim single quotes for the parameters value in the pg config file (#2523) ([#2527](https://github.com/apecloud/kubeblocks/pull/2527), @sophon-zt) +- config change does not take effect (#2511) ([#2543](https://github.com/apecloud/kubeblocks/pull/2543), @sophon-zt) +- KB_MYSQL_FOLLOWERS env inconsistent with cluster status after scale-in ([#2565](https://github.com/apecloud/kubeblocks/pull/2565), @free6om) +- BackupPolicyTemplate name of mysql-scale error ([#2583](https://github.com/apecloud/kubeblocks/pull/2583), @ZhaoDiankui) +- probe pg checkrole ([#2638](https://github.com/apecloud/kubeblocks/pull/2638), @xuriwuyun) +- adjust vtgate healthcheck options ([#2650](https://github.com/apecloud/kubeblocks/pull/2650), @ZhaoDiankui) +- add secure sql type ([#2656](https://github.com/apecloud/kubeblocks/pull/2656), @kizuna-lek) +- take cluster cmd 'kbcli cluster update --enabled-all-logs' effect ([#2663](https://github.com/apecloud/kubeblocks/pull/2663), @yangmeilly) +- h-scale pvc unexpected deleting ([#2680](https://github.com/apecloud/kubeblocks/pull/2680), @free6om) +- support mongodb backup ([#2683](https://github.com/apecloud/kubeblocks/pull/2683), @xuriwuyun) +- replicationSet cluster stop failed fix ([#2691](https://github.com/apecloud/kubeblocks/pull/2691), @Y-Rookie) +- h-scale pvc unexpected deleting (#2680) ([#2730](https://github.com/apecloud/kubeblocks/pull/2730), @free6om) +- mongodb support addon ([#2758](https://github.com/apecloud/kubeblocks/pull/2758), @xuriwuyun) +- mongodb support addon (#2758) ([#2764](https://github.com/apecloud/kubeblocks/pull/2764), @xuriwuyun) +- h-scale volumesnapshot unexpected deleted ([#2789](https://github.com/apecloud/kubeblocks/pull/2789), @free6om) +- set driver for bench config ([#2804](https://github.com/apecloud/kubeblocks/pull/2804), @ldming) +- h-scale pvc pending with WaitForFirstConsumer ([#2836](https://github.com/apecloud/kubeblocks/pull/2836), @free6om) +- cli list-instances throw error when node is not found ([#2936](https://github.com/apecloud/kubeblocks/pull/2936), @ldming) +- wrong viper setting in ut ([#2937](https://github.com/apecloud/kubeblocks/pull/2937), @free6om) +- mysql cluster execute ops VolumeExpansion Failed ([#2950](https://github.com/apecloud/kubeblocks/pull/2950), @free6om) +- cluster always updating after termination-policy updated ([#2956](https://github.com/apecloud/kubeblocks/pull/2956), @free6om) +- h-scale too many backup error warning events ([#2964](https://github.com/apecloud/kubeblocks/pull/2964), @free6om) +- some command examples can not be executed ([#3034](https://github.com/apecloud/kubeblocks/pull/3034), @ldming) +- adjust dependency resource deletion order when cluster deletion ([#3065](https://github.com/apecloud/kubeblocks/pull/3065), @Y-Rookie) +- mongodb restart alert title is incorrect (#3171) ([#3173](https://github.com/apecloud/kubeblocks/pull/3173), @yangmeilly) +- observed generation wrong patched ([#3184](https://github.com/apecloud/kubeblocks/pull/3184), @free6om) +- cli playground failed ([#3240](https://github.com/apecloud/kubeblocks/pull/3240), @ldming) + +### Miscellaneous +- lifecycle dag ([#1571](https://github.com/apecloud/kubeblocks/pull/1571), @free6om) +- add cluster default webhook for `PrimaryIndex` ([#1677](https://github.com/apecloud/kubeblocks/pull/1677), @heng4fun) +- refactor labels usage ([#1696](https://github.com/apecloud/kubeblocks/pull/1696), @heng4fun) +- update probe mysql tests ([#1808](https://github.com/apecloud/kubeblocks/pull/1808), @xuriwuyun) +- update pg probe url ([#2115](https://github.com/apecloud/kubeblocks/pull/2115), @xuriwuyun) +- cli support to output addon install progress ([#2132](https://github.com/apecloud/kubeblocks/pull/2132), @ldming) +- rewrite overview ([#2266](https://github.com/apecloud/kubeblocks/pull/2266), @TalktoCrystal) +- move loadbalancer sub-module to a separate repo https ([#2354](https://github.com/apecloud/kubeblocks/pull/2354), @iziang) +- use gitlab helm repo if failed to get ip location ([#2421](https://github.com/apecloud/kubeblocks/pull/2421), @ldming) +- update redis role probe ([#2554](https://github.com/apecloud/kubeblocks/pull/2554), @xuriwuyun) +- update mongodb helm ([#2575](https://github.com/apecloud/kubeblocks/pull/2575), @xuriwuyun) +- kbcli support mongodb ([#2580](https://github.com/apecloud/kubeblocks/pull/2580), @xuriwuyun) +- support xengine for apecloud-mysql ([#2608](https://github.com/apecloud/kubeblocks/pull/2608), @sophon-zt) +- support postgresql 14.7 instead of 15.2 ([#2613](https://github.com/apecloud/kubeblocks/pull/2613), @ldming) +- improve cluster create examples ([#2641](https://github.com/apecloud/kubeblocks/pull/2641), @ldming) +- ut for nil backup policy ([#2654](https://github.com/apecloud/kubeblocks/pull/2654), @free6om) +- sqlchannel add test ([#2694](https://github.com/apecloud/kubeblocks/pull/2694), @xuriwuyun) +- configure does not take effect ([#2735](https://github.com/apecloud/kubeblocks/pull/2735), @sophon-zt) +- remove default postgresql metrics service and optimize redis sentinel configuration ([#2741](https://github.com/apecloud/kubeblocks/pull/2741), @Y-Rookie) +- cli set default cluster value based on workload and component type ([#2743](https://github.com/apecloud/kubeblocks/pull/2743), @ldming) +- disable hook when uninstall KubeBlocks ([#2795](https://github.com/apecloud/kubeblocks/pull/2795), @ldming) +- refine kbcli for mongodb ([#2834](https://github.com/apecloud/kubeblocks/pull/2834), @xuriwuyun) +- refine kbcli for mongodb (#2834) ([#2841](https://github.com/apecloud/kubeblocks/pull/2841), @xuriwuyun) +- improve cli cluster describe ([#2860](https://github.com/apecloud/kubeblocks/pull/2860), @ldming) +- refine kbcli for mongodb ([#2927](https://github.com/apecloud/kubeblocks/pull/2927), @xuriwuyun) +- cli supports to create additional resource for postgresql ([#3009](https://github.com/apecloud/kubeblocks/pull/3009), @ldming) +- add kbcli connect example support for mongodb ([#3023](https://github.com/apecloud/kubeblocks/pull/3023), @xuriwuyun) +- improve kubeblocks install and uninstall command ([#3053](https://github.com/apecloud/kubeblocks/pull/3053), @ldming) +- local playground with one replica ([#3255](https://github.com/apecloud/kubeblocks/pull/3255), @ldming) +- improve ApeCloud mysql description ([#3292](https://github.com/apecloud/kubeblocks/pull/3292), @ldming) + +### Easy of Use +- `kbcli playground` supports one-command launching on running environments of Alibaba Cloud, Tencent Cloud, and GCP to experience complete KubeBlocks functionality +- kbcli supports creating clusters by entering CPU, memory, or class type +- kbcli supports tagging related resources of cluster +- kbcli is compatible with macOS package manager `brew` +- kbcli supports `preflight` command to check whether the environment meets the requirements for installing KubeBlocks +- kbcli adds object storage addon for storing full file backups, logs, and other data +- `kbcli install` runs preflight to check whether the environment meets the requirements, including node taints, storage class, and other check rules +- kbcli addon adds timeout parameter, printing exception information when enable fails +- Addon inherits the affinity and tolerations configuration of KubeBlocks +- `kbcli uninstall` prompts information to delete backup files, printing log information if the deletion fails +- ClusterDefinition API `spec.connectionCredential` add following built-in variables: + - Headless service FQDN `$(HEADLESS_SVC_FQDN)` placeholder, value pattern - `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute + +#### Compatibility +- Compatible with AWS EKS v1.22/v1.23/v1.24/v1.25 +- Compatible with Alibaba Cloud ACK v1.22/v1.24 +- Compatible with Tencent Cloud TKE standard cluster v1.22/v1.24 +- Compatible with GCP GKE standard cluster v1.24/v1.25 + +#### Stability +- KubeBlocks limits the combination of CPU and memory to avoid unreasonable configurations that reduce resource utilization or system stability + +#### Performance +- High-availability MySQL 8.0 with 4C 8GB 500GB, throughput and RT differences of various products on AWS, including ApeCloud MySQL Raft group, AWS RDS operator, Operator for Percona Server for MySQL, Oracle MySQL Operator for Kubernetes + +### API changes +- New APIs: + - backuppolicytemplates.apps.kubeblocks.io + - componentclassdefinitions.apps.kubeblocks.io + - componentresourceconstraints.apps.kubeblocks.io + +- Deleted APIs: + - backuppolicytemplates.dataprotection.kubeblocks.io + +- New API attributes: + - clusterdefinitions.apps.kubeblocks.io API + - spec.type + - spec.componentDefs.customLabelSpecs + - clusterversions.apps.kubeblocks.io API + - spec.componentVersions.clientImage (EXPERIMENTAL) + - clusters.apps.kubeblocks.io API + - spec.componentSpecs.classDefRef + - spec.componentSpecs.serviceAccountName + - configconstraints.apps.kubeblocks.io API + - spec.reloadOptions.shellTrigger.namespace + - spec.reloadOptions.shellTrigger.scriptConfigMapRef + - spec.reloadOptions.tplScriptTrigger.sync + - spec.selector + - opsrequests.apps.kubeblocks.io API + - spec.restoreFrom + - spec.verticalScaling.class + - status.reconfiguringStatus.configurationStatus.updatePolicy + - backuppolicies.dataprotection.kubeblocks.io API + - spec.full + - spec.logfile + - spec.retention + - backups.dataprotection.kubeblocks.io + - status.manifests + - backuptools.dataprotection.kubeblocks.io + - spec.type + +- Renamed API attributes: + - clusterdefinitions.apps.kubeblocks.io API + - spec.componentDefs.horizontalScalePolicy.backupTemplateSelector -> spec.componentDefs.horizontalScalePolicy.backupPolicyTemplateName + - spec.componentDefs.probe.roleChangedProbe -> spec.componentDefs.probe.roleProbe + - backuppolicies.dataprotection.kubeblocks.io API + - spec.full + - restorejobs.dataprotection.kubeblocks.io API + - spec.target.secret.passwordKeyword -> spec.target.secret.passwordKey + - spec.target.secret.userKeyword -> spec.target.secret.usernameKey + - addons.extensions.kubeblocks.io API + - spec.helm.installValues.secretsRefs -> spec.helm.installValues.secretRefs + +- Deleted API attributes: + - opsrequests.apps.kubeblocks.io API + - status.observedGeneration + - backuppolicies.dataprotection.kubeblocks.io API + - spec.backupPolicyTemplateName + - spec.backupToolName + - spec.backupType + - spec.backupsHistoryLimit + - spec.hooks + - spec.incremental + - backups.dataprotection.kubeblocks.io API + - spec.ttl + - status.CheckPoint + - status.checkSum + - addons.extensions.kubeblocks.io API + - spec.helm.valuesMapping.jsonMap.additionalProperties + - spec.helm.valuesMapping.valueMap.additionalProperties + - spec.helm.valuesMapping.extras.jsonMap.additionalProperties + - spec.helm.valuesMapping.extras.valueMap.additionalProperties + +- Updates API Status info: + - clusters.apps.kubeblocks.io API + - status.components.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting. + - status.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are ConditionsError, SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting. + - opsrequests.apps.kubeblocks.io API + - status.components.phase valid values are Running, Stopped, Failed, Abnormal, Creating, Updating; REMOVED phases are SpecUpdating, Deleting, Deleted, VolumeExpanding, Reconfiguring, HorizontalScaling, VerticalScaling, VersionUpgrading, Rebooting, Stopping, Starting, Exposing. + - status.phase added 'Creating' phase. + +## Upgrading to KubeBlocks 0.5.0 +- N/A if upgrading from 0.4 or older version. diff --git a/docs/en/release-1_0_1/release_notes/v0.6.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.6.0/_category_.yml new file mode 100644 index 00000000..aacd760c --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.6.0/_category_.yml @@ -0,0 +1,4 @@ +position: 4 +label: v0.6.0 +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/release_notes/v0.6.0/v0.6.0.mdx b/docs/en/release-1_0_1/release_notes/v0.6.0/v0.6.0.mdx new file mode 100644 index 00000000..fd1ab407 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.6.0/v0.6.0.mdx @@ -0,0 +1,269 @@ +# KubeBlocks 0.6.0 (2023-08-18) + +We are happy to announce the official release of KubeBlocks 0.6.0! 🚀 🎉 🎈 + +This version supports stream computing engines Kafka and Pulsar and supports MySQL read-write splitting, introduces an interactive parameter configuration method. + +We would like to extend our appreciation to all contributors who helped make this release happen. + +## Highlights + +- KubeBlocks supports Kafka v3.3 + - Apache Kafka is an open-source distributed event storage and streaming computing system that provides high reliability, throughput, and low latency for data pipelines, streaming analysis, and data integration. It is widely used in log collection and metric monitoring scenarios. KubeBlocks supports Kafka v3.3, which announced that KRaft meets production requirements, providing better partition scalability and resilience, and saving the additional costs of ZooKeeper. In addition, KubeBlocks also supports pushing data changes from MySQL and PostgreSQL to Kafka, making it easier for users to further process and handle the data. +- KubeBlocks supports Pulsar v2.11 + - Apache Pulsar is an open-source distributed messaging and stream processing platform. It aims to provide scalability, high performance, and reliability to meet the needs of modern data processing and real-time messaging. KubeBlocks supports Apache Pulsar v2.11, and compared to traditional deployment methods, KubeBlocks can automate fault tolerance, scaling, and other day2 operations. +- KubeBlocks supports MySQL read-write splitting + - Read-write splitting is designed to improve the read-only processing capability of a MySQL database cluster. All write queries are sent to the master node, while read-only queries that do not modify data are distributed to multiple slave nodes. Read-write splitting is used together with the MySQL Raft Group cluster, which automatically detects changes in the master node and uses the current master node of the cluster to achieve failover. Set `read_write_splitting_policy` as read-write splitting at the global or session level, and the default policy is `LEAST_CURRENT_OPERATIONS`, which routes read-only queries to the slave node with the least active operations. The MySQL Raft Group cluster supports up to 5 nodes. + +## Breaking changes + +- For KubeBlocks v0.6, newly created Redis and PG clusters need password to connect, while v0.5 used to allow password-free login +- For KubeBlocks v0.6, the backup and restore function is updated greatly. You need to manually upgrade the 0.5 version cluster to 0.6 and update backupPolicy to use the new function. +- For KubeBlocks v0.6, since password-free login function is not supported for Postgres cluster, when restoring cluster upgraded from v0.5, if the new restored cluster is in creating status for a long time, you can check the pod logs to see whether there is `password authentication failed` error, you can update password to fix this. + + +## Acknowledgements + +Thanks to everyone who made this release possible! + +## What's New + +### Pulsar + +- Cluster lifecycle management and operation management, supporting the creation of Pulsar clusters, cluster deletion, cluster restarting, horizontal scaling, vertical scaling, volume expanding, and parameter modifying. +- For monitoring, support performance monitoring of CPU, memory, network read/write traffic, and more for ZooKeeper, BookKeeper, and Broker. + +### Kafka + +- Cluster lifecycle management and operation management, supporting cluster creation, deletion, horizontal scaling, vertical scaling, volume expanding, and modifying parameters. + - Horizontal scaling: In combined mode, broker replicas support 1, 3, or 5 copies. In separated mode, brokers support 1 to 100 copies and Kafka controllers support 1, 3, or 5 copies. +- For monitoring, supports performance monitoring of Broker's CPU, memory, network read/write traffic, etc. + +### MySQL + +- MySQL RaftGroup Cluster supports Switchover +- MySQL RaftGroup Cluster supports MySQL read-write splitting. +- Data Recovery, Non-Destructive Recovery by Point-in-time(PITR). It's a beta feature. +- Support proxy enable when creating cluster. (Beta) + - The default specification of Vitess proxy can meet the vast majority of user scenarios. Vitess proxy automatically triggers resource increase or decrease based on changes in database nodes and node specifications, without manual operation. The CPU of Vitess proxy is 1/6 of the total number of CPU cores in the cluster nodes (three nodes), rounded up to 0.5c granularity, with a minimum of 0.5c and a maximum of 64c. The default number of replicas is 1, and currently, modifying the number of replicas is not supported + - Connection Address: The proxy has a default connection address that supports read-write splitting. The expose command can generate VPC addresses and public network addresses for the proxy connection address. + - Supports setting Vitess proxy read-write splitting policies. + +### PostgreSQL + +- Support switchover for PG Replication Cluster +- Built-in pgBouncer + +### MongoDB + +- MongoDB replicaset supports switchover +- Data recovery, non-destructive PITR (Beta) + +### Data migration + +- Add `kbcli migration` command, including creating migration tasks, viewing migration task list, viewing migration task details, terminating migration tasks, viewing logs, viewing migration templates, and other functions. Supports full migration and incremental synchronization. +- Support MySQL data migration from MySQL 8.0 to MySQL 8.0. +- Support PostgreSQL data migration from PostgreSQL 14 to PostgreSQL 14. +- Support MongoDB data migration from MongoDB5.X/6.0 to MongoDB 5.X/6.0. + +### Compatibility + +- Support remote write to Prometheus server through Prometheus v2.41 - 2.45 compatibility testing +- Adapt kbcli to package managers of Ubuntu and Amazon Linux 2 +- Adapt kbcli to Windows PowerShell and package managers +- Support local environment running on Ubuntu and Amazon Linux 2 for kbcli playground +- Support local environment running on Windows for kbcli playground + +### Usability + +- kbcli supports users to modify KubeBlocks parameters with local editing tools on the operating system +- kbcli supports the "fault-inject" extension for fault injection +- kbcli supports the "report" command to package cluster context information into a compressed file for problem troubleshooting assistance +- kbcli supports interactive editing of configuration information for DB clusters. For cluster create, support --edit to interactively edit the yaml, and show the cluster summary before create. +- Support canceling Hscale/Vscale OpsRequest that are running (Beta) +- Add kbcli playground Grafana overview page +- Kbcli alert can set email server +- Support initializing the creation of databases and users +- Specify a configuration file for initialization settings when installing KubeBlocks +- Disk full lock for MySQL, PostgreSQL, MongoDB: When the database storage space is about to be full (disk usage is more than 90%), set the database to a read-only state. + +### Backup and Restore + +- The backup storage repository (backupRepo) refers to the target path where backup files are stored. KubeBlocks supports public cloud object storage and S3-compatible object storage. +- Support modifying cluster configuration when restoring a new cluster +- Add "describe-backup" command to view backup details + +### Observability + +- Support integration with external monitoring systems include Prometheus, VictoriaMetrics, and AMP. Cluster performance monitoring metrics are output to the target monitoring system through remote write. +- Real-time dumping of operation logs for K8s/KubeBlocks clusters to object storage, divided by time and size, and providing read-only addresses +- Real-time dumping of error logs for K8s/KubeBlocks clusters to object storage, divided by time and size, and providing read-only addresses + +### Bug Fixes + +- There is a quota problem, no default resource limits are set for containers such as metrics. ([#2738](https://github.com/apecloud/kubeblocks/pull/2738), @nashtsai) +- cli ops command output lost ops name ([#2904](https://github.com/apecloud/kubeblocks/pull/2904), @ldming) +- probe event lost ([#3172](https://github.com/apecloud/kubeblocks/pull/3172), @xuriwuyun) +- update probe internal module ([#3414](https://github.com/apecloud/kubeblocks/pull/3414), @xuriwuyun) +- cli playground init cluster without sa, role and rolebinding ([#3428](https://github.com/apecloud/kubeblocks/pull/3428), @ldming) +- cli created cluster with wrong sa name ([#3500](https://github.com/apecloud/kubeblocks/pull/3500), @ldming) +- The cluster status and pod status are inconsistent. clusters can terminate faster than pods. ([#3558](https://github.com/apecloud/kubeblocks/pull/3558), @lynnleelhl) +- enable consensus_set controller by feature gate ([#3616](https://github.com/apecloud/kubeblocks/pull/3616), @free6om) +- licensing description ([#3664](https://github.com/apecloud/kubeblocks/pull/3664), @free6om) +- change preflight checks rule for low spec machine in test case ([#3722](https://github.com/apecloud/kubeblocks/pull/3722), @yangmeilly) +- redis prohibits passwordless access by default ([#3726](https://github.com/apecloud/kubeblocks/pull/3726),@Y-Rookie) +- mongodb post start script ([#3956](https://github.com/apecloud/kubeblocks/pull/3956), @xuriwuyun) +- mongodb pod terminating takes too long time ([#3971](https://github.com/apecloud/kubeblocks/pull/3971), @xuriwuyun) +- mongodb restore problem, reset password ([#3987](https://github.com/apecloud/kubeblocks/pull/3987), @xuriwuyun) +- MongoDB does not receive signals when terminating pods. ([#3990](https://github.com/apecloud/kubeblocks/pull/3990), @xuriwuyun) +- add more judgements for mongodb replset initialization ([#4034](https://github.com/apecloud/kubeblocks/pull/4034), @xuriwuyun) +- parallel_transformer concurrent map writes ([#4079](https://github.com/apecloud/kubeblocks/pull/4079), @free6om) +- probe mysql binding test fail ([#4082](https://github.com/apecloud/kubeblocks/pull/4082), @xuriwuyun) +- kbcli set componentserviceaccontName ([#4276](https://github.com/apecloud/kubeblocks/pull/4276), @xuriwuyun) +- create a default sa if probes is required and serviceaccountname not set ([#4280](https://github.com/apecloud/kubeblocks/pull/4280), @xuriwuyun) +- The processing logic of WeSQL scaling is not stable enough, and downsizing can easily lead to inconsistent kernel and cluster states. ([#4372](https://github.com/apecloud/kubeblocks/pull/4372), [#4293](https://github.com/apecloud/kubeblocks/pull/4293), [#3202](https://github.com/apecloud/kubeblocks/pull/3202), @leon-inf) +- replicationSet cluster phase is abnormal during h/v-scale operations. ([#4377](https://github.com/apecloud/kubeblocks/pull/4377), @Y-Rookie) +- PVCs are occasionally not deleted after scale-in. ([#4378](https://github.com/apecloud/kubeblocks/pull/4378), @lynnleelhl) +- kbcli cluster connect show example error 4403 ([#4404](https://github.com/apecloud/kubeblocks/pull/4404), @fengluodb) +- if cluster backup field is nil, do nothing ([#4435](https://github.com/apecloud/kubeblocks/pull/4435), @ldming) +- failed to enable kafka addon ([#4474](https://github.com/apecloud/kubeblocks/pull/4474), @ldming) +- unstable data protection test case ([#4497](https://github.com/apecloud/kubeblocks/pull/4497), @ldming) +- shutdown redis-server when create replication relationship failed and update postgresql start scripts ([#4568](https://github.com/apecloud/kubeblocks/pull/4568), @Y-Rookie) +- sa creation problem for template ([#4626](https://github.com/apecloud/kubeblocks/pull/4626), @xuriwuyun) +- mongodb restore problem ([#4690](https://github.com/apecloud/kubeblocks/pull/4690), @xuriwuyun) + +## API changes + +- New APIs: + - backuprepos.dataprotection.kubeblocks.io + - replicatedstatemachines.workloads.kubeblocks.io + - storageproviders.storage.kubeblocks.io +- New API attributes: + - clusters.apps.kubeblocks.io API + - spec.availabilityPolicy + - spec.backup + - spec.tenancy + - spec.replicas + - spec.resources + - spec.storage + - spec.mode + - spec.parameters + - spec.monitor + - spec.network + - spec.componentSpecs.noCreatePDB + - clusterdefinitions.apps.kubeblocks.io API + - spec.componentDefs.componentDefRef + - spec.componentDefs.configSpecs.asEnvfrom + - spec.componentDefs.configSpecs.lazyRenderedConfigSpec + - spec.componentDefs.statelessSpec + - spec.componentDefs.statefulSpec + - spec.componentDefs.switchoverSpec + - spec.componentDefs.volumeProtectionSpec + - spec.componentDefs.systemAccounts.accounts.provisionPolicy.statements.update + - spec.componentDefs.consensusSpec.LLPodManagementPolicy + - spec.componentDefs.consensusSpec.LLUpdateStrategy + - clusterversions.apps.kubeblocks.io API + - spec.componentVersions.configSpecs.asEnvFrom + - spec.componentVersions.configSpecs.lazyRenderedConfigSpec + - spec.componentVersions.systemAccountSpec + - spec.componentVersions.switchoverSpec + - configconstraint.apps.kubeblocks.io API + - spec.toolsImageSpec + - spec.scriptConfigs + - spec.downwardAPIOptions + - opsrequests.apps.kubeblocks.io API + - spec.cancel + - spec.switchover + - spec.ttlSecondsBeforeAbort + - spec.scriptSpec + - status.components.reason + - status.components.message + - spec.verticalScaling.classDefRef + - status.lastConfiguration.components.classDefRef + - backuppolicytemplates.apps.kubeblocks.io API + - spec.backupPolicies.schedule.StartingDeadlineMinutes + - spec.backupPolicies.[snapshot|datafile|logfile].backupStatusUpdates.useTargetPodServiceAccount + - backups.dataprotection.kubeblocks.io API + - status.sourceCluster + - status.availableReplicas + - status.manifests.backupTool.VolumeName + - backuppolicies.dataprotection.kubeblocks.io API + - spec.[logfile|datafile].backupRepoName + - spec.schedule.StartingDeadlineMinutes + - spec.[snapshot|datafile|logfile].backupStatusUpdates.useTargetPodServiceAccount + - backuptools.dataprotection.kubeblocks.io API + - spec.logical.podScope + - addons.extensions.kubeblocks.io API + - spec.cliPlugins + - spec.helm.chartsImage + - spec.helm.chartsPathInImage +- Modified API attributes: + - clusters.apps.kubeblocks.io API + - spec.clusterDefinitionRef add validation MaxLength=63 + - spec.clusterVersionRef add validation MaxLength=63 + - spec.componentSpecs.name update validation MaxLength: `15` -> `22` + - spec.componentSpecs.name update validation Pattern: `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` -> `^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentSpecs.componentDefRef update validation MaxLength: `63` -> `22` + - spec.componentSpecs.componentDefRef update validation Pattern: `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` -> `^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentSpecs.classDefRef.name add validation MaxLength=63 + - spec.componentSpecs.classDefRef.name add validation Pattern=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + - spec.componentSpecs.switchPolicy default value: `MaximumAvailability -> Noop`, enum values`{MaximumAvailability, MaximumDataProtection, Noop} -> {Noop}` + - clusterdefinitions.apps.kubeblocks.io API + - spec.componentDefs.name update validation MaxLength: `18` -> `22` + - spec.componentDefs.name update validation Pattern: `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` -> `^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentDefs.scriptSpecs.namespace add validation Pattern=`^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentDefs.scriptSpecs.volumeName update validation MaxLength: `32` -> `63` + - spec.componentDefs.scriptSpecs.volumeName add validation Pattern=`^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentDefs.replicationSpec update type: `ReplicationSpec` -> `ReplicationSetSpec` + - spec.componentDefs.horizontalScalePolicy.type: delete Snapshot type and add CloneVolume type + - clusterversions.apps.kubeblocks.io API + - spec.componentVersions.configSpecs.namespace add validation Pattern=`^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$` + - spec.componentVersions.configSpecs.volumeName update validation MaxLength: `32` -> `63` + - spec.componentVersions.configSpecs.volumeName add validation Pattern=`^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - componentclassdefinition.apps.kubeblocks.io API + - status.classes update type `ComponentClassInstance` -> `ComponentClass` + - componentresourceconstraint.apps.kubeblocks.io API + - spec + - configconstraint.apps.kubeblocks.io API + - spec.reloadOptions.unixSignalTrigger.processName delete validation Pattern=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + - spec.reloadOptions.shellTrigger + - spec.reloadOptions.tplScriptTrigger.namespace add validation Pattern=`^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$` + - spec.formatterConfig.format add type props-plus + - opsrequests.apps.kubeblocks.io API + - spec.clusterRef add validation XValidation:rule="self == oldSelf" + - spec.type add type Switchover and DataScript + - spec.type add validation XValidation:rule="self == oldSelf" + - spec.upgrade add validation XValidation:rule="self == oldSelf" + - spec.horizontalScaling add validation XValidation:rule="self == oldSelf" + - spec.restart add validation XValidation:rule="self == oldSelf" + - spec.reconfigure add validation XValidation:rule="self == oldSelf" + - spec.expose add validation XValidation:rule="self == oldSelf" + - spec.restoreFrom add validation XValidation:rule="self == oldSelf" + - status.phase add type Cancelling and Cancelled + - backuppolicytemplates.apps.kubeblocks.io API + - spec.backupPolicies.componentDefRef update validation MaxLength: `63` -> `22` + - spec.backupPolicies.componentDefRef update validation Pattern: `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` -> `^[a-z]([a-z0-9\-]*[a-z0-9])?$` + - spec.backupPolicies.[snapshot|datafile|logfile].backupStatusUpdates.updateStage update validation: optional -> required + - backups.dataprotection.kubeblocks.io API + - status.phase add type Running and Deleting + - status.manifests.backupTool rename `checkSum` -> `checksum` and `CheckPoint` -> `checkpoint` + - backuppolicies.dataprotection.kubeblocks.io API + - spec.[logfile|datafile].persistentVolumeClaim update validation: `required` -> `optional` + - spec.[logfile|datafile].persistentVolumeClaim.name update validation: `required` -> `optional` + - spec.[logfile|datafile].persistentVolumeClaim.storageClassName add validation Pattern:`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + - spec.[logfile|datafile].persistentVolumeClaim.persistentVolumeConfigMap.name add validation Pattern: `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + - spec.[logfile|datafile].persistentVolumeClaim.persistentVolumeConfigMap.namespace add validation Pattern: `^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$` + - backuptools.dataprotection.kubeblocks.io API + - spec.deployKind update value `{job,daemon}` -> `{job, statefulSet}` +- Deleted API attributes: + - clusters.apps.kubeblocks.io API + - spec.componentSpecs.primaryIndex + - clusterdefinitions.apps.kubeblocks.io API + - spec.componentDefs.maxUnavailable + - clusterversions.apps.kubeblocks.io API + - spec.componentVersions.clientImage + - componentclassdefinition.apps.kubeblocks.io API + - spec.groups.resourceConstraintRef + - opsrequests.apps.kubeblocks.io API + - spec.verticalScaling.class + - status.lastConfiguration.components.class diff --git a/docs/en/release-1_0_1/release_notes/v0.7.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.7.0/_category_.yml new file mode 100644 index 00000000..65721440 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.7.0/_category_.yml @@ -0,0 +1,4 @@ +position: 3 +label: v0.7.0 +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/release_notes/v0.7.0/v0.7.0.mdx b/docs/en/release-1_0_1/release_notes/v0.7.0/v0.7.0.mdx new file mode 100644 index 00000000..1975ee0f --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.7.0/v0.7.0.mdx @@ -0,0 +1,79 @@ +# KubeBlocks 0.7.0 (2023-11-06) + +We're thrilled to announce the official release of KubeBlocks v0.7.0! 🚀 🎉 🎈 + +This version supports the function of referencing external components, making it possible to assemble building blocks - add-ons more flexibly. + +Backup is decoupled from the cluster with a separate API, and three new object storage services from public cloud providers are added as backup storage options. + +Thanks to the support of our community, KubeBlocks has included 31 open-source database engines, including MariaDB, Elasticsearch, Pulsar, Pika and more, providing more choices for you. + +We would like to extend our appreciation to all contributors who helped make this release happen. + +## Highlights + +### Support referencing external components + +The new feature of referencing external components brings greater flexibility to KubeBlocks clusters, allowing them to be assembled like building blocks to create new clusters. Currently, this function can be applied to two scenarios. Firstly, you can reference external components, such as external Zookeeper, that are not created and managed by KubeBlocks. You just need to define the service description and Kubernetes deployment is not required. Another scenario is that clusters created by KubeBlocks can be referenced as components by other clusters. For example, a Redis Sentinel cluster can be referenced by other Redis Replication clusters for managing high-availability. + +### Decouple cluster and backup [#4877](https://github.com/apecloud/kubeblocks/issues/4877), [#4494](https://github.com/apecloud/kubeblocks/issues/4494) + +The backup policy supports customizing backup methods, allowing you to specify a backup method when creating a backup. The BackupTool has been replaced with the ActionSet CRD, which defines actions for backup and restore. Three new object storage services, GCS, OBS, and COS, have been added as backup storage options. Besides, BackupSchedule is added to decouple automatic scheduling configurations from BackupPolicy that can be used for both automatic and manual backup. + +### Support Pika v3.5 + +Pika, developed by Qihoo, is an open-source NoSQL database that is compatible with Redis interfaces such as string, hash, list, zset, and set operations. It offers a cost advantage in 100 GB level and larger data volumes. Switching from Redis to Pika is seamless, as it maintains the same usage and operation habits. Currently, KubeBlocks has already supported deploying the sharded cluster mode from Pika v3.5. After a cluster is created by KubeBlocks, Pika is automatically added to the Codis cluster with rebalancing applied. KubeBlocks can also automatically manage Pika Primary-Secondary clusters, which can automatically add one primary with one or multiple secondaries. + +## What's Changed + +### New features + +#### Pulsar [#4587](https://github.com/apecloud/kubeblocks/issues/4587) + +Supports multiple Pulsar clusters that can share a zookeeper component. + +#### Backup and restore + +Supports three object storage services, namely GCS(Google Cloud Storage), OBS (Huawei Cloud Object Storage), and COS (Tencent Cloud Object Storage), as backup storage options. + +#### Compatibility + +Compatible with Huawei Cloud. [#4697](https://github.com/apecloud/kubeblocks/issues/4697) + +#### Support multiple open-source engines + +The table below provides an overview of the integrated engines and their capabilities in KubeBlocks. + +| V0.7.0 | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | Monitor | +|---------------------------------------|--------|--------|--------------|------------|---------|----------------|------|--------|-----------------------------|---------|----------|------------|---------| +| apecloud-mysql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | ✔️ | ✔️ | ✔️ | +| postgresql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| redis | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | ✔️ | N/A | ✔️ | +| mongodb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | ✔️ | ✔️ | +| kafka | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | ✔️ | +| pulsar | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | ✔️ | +| weaviate | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | ✔️ | +| qdrant | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | ✔️ | +| greptimedb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| nebula | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| risingwave | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| starrocks | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| etcd | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| oceanbase | | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| foxlake | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| orioledb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| oracle-mysql | ✔️ | N/A | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | N/A | N/A | N/A | N/A | N/A | +| official-postgresql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| mysql (Primary-Secondary replication) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | ✔️ | +| openldap | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| neon | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| opensearch | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| vllm | N/A | N/A | N/A | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| ggml | | N/A | N/A | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| milvus | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| elasticsearch | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| tdengine | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| clickhouse | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| PolarDB-X | ✔️ | ✔️ | N/A | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | ✔️ | +| Zookeeper | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | +| MariaDB | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | diff --git a/docs/en/release-1_0_1/release_notes/v0.8.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.8.0/_category_.yml new file mode 100644 index 00000000..555e2cd5 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.8.0/_category_.yml @@ -0,0 +1,4 @@ +position: 2 +label: v0.8.0 +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/release_notes/v0.8.0/v0.8.0.mdx b/docs/en/release-1_0_1/release_notes/v0.8.0/v0.8.0.mdx new file mode 100644 index 00000000..b7356251 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.8.0/v0.8.0.mdx @@ -0,0 +1,101 @@ +# KubeBlocks 0.8.0 (2024-01-12) + +Exciting news! KubeBlocks v0.8.0 is officially released! 🚀 🎉 🎈 + +KubeBlocks v0.8.0 takes a big step forward in user experience by introducing component-level APIs, which makes standardized building blocks even smaller and more convenient to be reused. For example, popular Key-Value store components like ETCD and/or ZK services, which are widely used as state store in various type of database clusters, can be directly referenced. + +We also made Vitess Proxy a standard component, so that developers do not need to repeat the heavy lifting work when defining the read-write separation topology of MySQL or PostgreSQL DB engines. + +Further more, the Addon mechanism has also been significantly improved, as database engines' Helm charts is decoupled from the release stream. + +## Highlights + +### Independent Component API + +When integrating the new database engine, we found deficiencies in the abstract design of KubeBlocks. v0.8.0 splits Component from Cluster definition to better support database types with multiple components. It supports variable references between Components, including ConfigMap, Secret, Service, ServiceReference and other variable reference types, which can better connect the relationships between components and lay the foundation for building clusters with different topologies. + +### Remove the addon helm chart from KubeBlocks repo + +In previous versions, Helm charts for database engines were located in the "deploy" directory, tightly coupled with the KubeBlocks Operator. However, this caused two problems: first, upgrading KubeBlocks would trigger the upgrade of the database engine; second, upgrading the database engine would overwrite existing CD/CV, restarting all the clusters. + +To address these problems, KubeBlocks v0.8.0 has placed the database engines into a standalone repository called "kubeblocks-addon", with version numbers added for both the database engines and associated resources. This ensures that new installations do not overwrite existing resources, thereby eliminating cluster restarts. And KubeBlocks provides the "kbcli addon" command, which allows users to download, install, use, and uninstall specific engine versions. + +### Supports multiple versions of database engine definitions + +Prior to v0.8.0, KubeBlocks upgrades might trigger a restart of the database cluster. KubeBlocks v0.8.0, with the new Component API and Addon helm chart storage mechanism, this problem has been solved to a certain extent. We will continue to optimize the design of multiple versions in the future, and ultimately achieve burden-free upgrades. + +## What's Changed + +### New features + +#### Pika + +Supports multiple Pulsar clusters that can share a zookeeper component. [#5823](https://github.com/apecloud/kubeblocks/issues/5823) + + +#### Clickhouse + +Integrates monitoring, scale-out and high availability. [#5784](https://github.com/apecloud/kubeblocks/issues/5784) + + +#### Oceanbase + +Adds new active and standby cluster modes to support the complete life cycle and integrate backup, recovery, monitoring and switching + + +#### MySQL + +- The Community Edition of MySQL 5.7 and 8.0 supports a full lifecycle with integrated backup recovery, monitoring, and HA. + + +- Adds log audit function on ApeCloud MySQL. + + +#### PostgreSQL + +Postgresql supports wal-g full backup and PITR. [#180](https://github.com/apecloud/kubeblocks-addons/pull/180) + + +#### OpsRequest + +Supports custom OpsRequest, enabling specific operational actions. For example, creating and deleting Kafka topics. + + +#### NodePort + +Enables NodePort access for Redis. + +### Compatibility + +Compatible with the pre-refactored ClusterDefinition and ClusterVersion APIs. + +### Usability + +- opsRequest supports task queuing, allowing users to submit multiple tasks at once. For tasks that cannot run concurrently, the system will automatically execute the next task after the previous one is completed #5887 + + +- During KubeBlocks installation, specifying the image repository address is now possible, speeding up image pull. + +### Observability + + Unifies configuration and management for logs and metrics collection. + +### API +- Adds interface definition in ComponentDefinition #5031 +- Adds OpsDefinition API #5940 +- Adds PreDelete Action for ActionSet . You can perform this action before deleting backup. #6361 + Stability +- Stability testings and related improvements + +### kbcli +- Enhances addon sub-command, you can install the addon from the index repository. (PROVIDE LINKS FOR HOWTO?) + +## Incompatible changes + +- In KubeBlocks 0.8.0, we have made improvements to Oceanbase (adding functions like creating master/standby clusters, support host network and dynamic ports, support backup/recovery, monitoring, logging, etc.), the clusters created in version 0.7.0 are incompatible with those in version 0.8.0, so if you are using the 0.7.0 version to manage Oceanbase, you are recommended to upgrade to the 0.8.0 version of KubeBlocks. Upgrade KubeBlocks first, then upgrade Oceanbase Addon. It is recommended to use OceanBase official data import and export tools (OBLOADER and OBDUMPER) to migrate data. + + +- KubeBlocks 0.8.0 streamlines the data engines installed by default when deploying KubeBlocks by removing greptime, influxdb, neon, oracle-mysql, oroledb, tdengine, mariadb, nebula, risingwave, starrocks, tidb, zookeeper. You can install them on-demand from the addon indexing repository with the kbcli addon subcommand or the kubectl apply command; if you are upgrading from a lower version, follow the upgrade manual to avoid deleting addons that are in use, which may affect running clusters. + + +- The Helm Chart of KubeBlocks 0.8.0 no longer contains dependent CRDs. When installing or upgrading KubeBlocks with the helm command, you need to install the corresponding CRDs before installing or upgrading KubeBlocks, please refer to the upgrade manual for details. diff --git a/docs/en/release-1_0_1/release_notes/v0.9.0/0.9.0.mdx b/docs/en/release-1_0_1/release_notes/v0.9.0/0.9.0.mdx new file mode 100644 index 00000000..a61714bb --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.9.0/0.9.0.mdx @@ -0,0 +1,111 @@ + +# KubeBlocks 0.9.0 (2024-07-09) + +We are thrilled to announce the release of KubeBlocks v0.9.0, which brings us one step closer to the highly anticipated v1.0 release. This version introduces several significant improvements and +new features that enhance the overall functionality and user experience of the KubeBlocks platform. + +## API Highlights + +- In KubeBlocks v0.9, with the introduction of topology support in KubeBlocks, the cluster building experience has become much more flexible and intuitive, akin to assembling a cluster using building blocks. The ClusterDefinition API has added the topologies field, allowing developers to provide various deployment patterns with different topologies. Database users can choose a topology when creating a Cluster through the topology field. For instance, the Redis Addon offers three topologies: Standalone, Replication, and Proxy. The Standalone topology only includes one Component - RedisServer, the Replication topology includes both RedisServer and Sentinel Components, and the Proxy topology adds a third Component, such as Twemproxy. +- KubeBlocks now supports managing horizontal scaling (Reshard) of distributed databases. You can represent a horizontal shard with a Component, and scale up or down this horizontal shard by adding or removing Components. This scaling capability will also be used in the distributed deployment of Redis and Pika. +- KubeBlocks now uses InstanceSet instead of StatefulSet to manage Pods. InstanceSet supports taking a specified Pod offline and Pod in-place update, and also the primary and secondary databases can adopt different pod specs in a database Replication architecture (StatefulSet doesn't support these features). +- Developers can add more custom event handlers for Components! The ComponentDefinition API, introduced in v0.8, includes the lifeCycleActions field, allowing you to define various custom event handlers. Building on this, KubeBlocks v0.9 provides more handlers for custom addon implementation, including roleprobe (node role probing), memberLeave (node offline), preTerminate (Component offline), and postProvision (Component online). The expansion of event handlers enhances KubeBlocks' expression capabilities. For example, preTerminate and postProvision can be used to execute cross-shard data redistribution (Rebalance) in distributed databases or initiate registration to third-party HA managers like Sentinel and Orchestrator. +Addon Highlights +- KubeBlocks supports Redis Cluster mode (sharding mode) [#5833](https://github.com/apecloud/kubeblocks/issues/5833) + Redis Cluster is designed to provide horizontal write scalability and intelligent client high-availability strategies, in addition to its excellent failover capability. Redis Cluster distributes data across multiple Redis nodes, significantly enhancing system capacity, performance, and availability. +- KubeBlocks introduces MySQL Replication mode [#1330](https://github.com/apecloud/kubeblocks/issues/1330) + Compared to MGR clusters, the MySQL Replication topology requires fewer resources (only two database replicas) and incurs less overhead for data replication. When there is no extreme demands for service availability and data reliability, the Replication topology is a more cost-effective choice. You can actively switch MySQL replica roles using kbcli or trigger a passive failover by deleting specified Kubernetes pods via kubectl. If there is no long transactions and large table DDLs, the failover generally can be completed within 30 seconds. + +## What's Changed + +### New Features + +**KubeBlocks** +- ClusterDefinition API + - Supports topology API, allowing developers to customize various topologies. [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- Cluster API + - Supports ShardingSpec API. [#6437](https://github.com/apecloud/kubeblocks/pull/6437) + - Supports sharding scaling. [#6774](https://github.com/apecloud/kubeblocks/pull/6774) +- ComponentDefinition API + - lifecycleActions API supports user-defined operation actions, including roleprobe, memberLeave, preTerminate, postProvision. [#6037](https://github.com/apecloud/kubeblocks/pull/6037) [#6582](https://github.com/apecloud/kubeblocks/pull/6582) [#6720](https://github.com/apecloud/kubeblocks/pull/6720) [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - New Vars API for referencing instance-related dynamic resources and information, including secret, service, and service reference. + - Supports Vars API. [#5919](https://github.com/apecloud/kubeblocks/pull/5919) + - Supports cross-Component Vars referencing. [#7155](https://github.com/apecloud/kubeblocks/pull/7155) + - Optimizes ServiceRef referencing. [#7006](https://github.com/apecloud/kubeblocks/pull/7006) + - Supports dynamic configuration, regenerating specified variables after vertical scaling or horizontal scaling is performed. [#6273](https://github.com/apecloud/kubeblocks/issues/6273) [#6690](https://github.com/apecloud/kubeblocks/issues/6690) +- Component + - Supports deleting Component. [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - Supports ComponentVersion. [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- InstanceSet API + - InstanceSet replaces StatefulSet to manage Pods. [#7084](https://github.com/apecloud/kubeblocks/pull/7084) + - Supports Instance Template. [#5799](https://github.com/apecloud/kubeblocks/issues/5799) + - Supports specified instance scaling down. [#6958](https://github.com/apecloud/kubeblocks/pull/6958) + - Supports In-place Update. [#7000](https://github.com/apecloud/kubeblocks/pull/7000) +- OpsRequest API + - Supports rebuilding faulty backup instances. [#6872](https://github.com/apecloud/kubeblocks/pull/6872) + - Supports the force flag to control concurrency. [#6828](https://github.com/apecloud/kubeblocks/pull/6828) + - Supports custom multi-task sequential execution. [#6735](https://github.com/apecloud/kubeblocks/pull/6735) +- Supports NodeCountScaler. [#7258](https://github.com/apecloud/kubeblocks/pull/7258) +- Supports PITR. [#6779](https://github.com/apecloud/kubeblocks/pull/6779) +- Supports cross-Namespace restore. [#6778](https://github.com/apecloud/kubeblocks/pull/6778) +**kbcli** +- Supports PostgreSQL point-in-time recovery. [#329](https://github.com/apecloud/kbcli/pull/329) +- cluster supports the rebuild-instance subcommand to rebuild instances. [#285](https://github.com/apecloud/kbcli/pull/295) +- cluster create subcommand supports elasticsearch. [#389](https://github.com/apecloud/kbcli/pull/389) +- Supports specifying path prefix when creating a backup repository. [#294](https://github.com/apecloud/kbcli/pull/294) + +**Addons** +*Redis* +- Supports the official Redis Cluster topology. [#301](https://github.com/apecloud/kubeblocks-addons/pull/301) +- Enhances the functionality and stability of Redis. + - Adapts to new APIs such as ComponentDefinition and ComponentVersion, supporting multiple topology forms. [#501](https://github.com/apecloud/kubeblocks-addons/pull/501) + - Optimizes Redis Replication Cluster initialization logic, removing dependency on DownwardAPI. [#462](https://github.com/apecloud/kubeblocks-addons/pull/462) [#616](https://github.com/apecloud/kubeblocks-addons/pull/616) + - Supports Redis v7.2.4. [#571](https://github.com/apecloud/kubeblocks-addons/pull/571) +- Redis sharding cluster supports backup and restore. [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MySQL* +- Adds the open-source component Orchestrator Addon to manage MySQL. [#625](https://github.com/apecloud/kubeblocks-addons/pull/625) [#567](https://github.com/apecloud/kubeblocks-addons/pull/567) +*PostgreSQL* +- Supports PostgreSQL PITR. [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +- Supports PostgreSQL v15.7. [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +*Qdrant* +- Qdrant sharding cluster supports backup and restore. [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MogDB* +- Supports the creation, scaling, backup and switchover of MogDB Replication Cluster in v5.0.5. [#343](https://github.com/apecloud/kubeblocks-addons/pull/343) [#350](https://github.com/apecloud/kubeblocks-addons/pull/350) +*ElasticSearch* +- Supports Elasticsearch v7.7.1, v7.10.1, and v8.8.2. [#767](https://github.com/apecloud/kubeblocks-addons/pull/767) +*Pulsar* +- Supports v3.0.2. [#340](https://github.com/apecloud/kubeblocks-addons/pull/40) +- Supports NodePort. [#358](https://github.com/apecloud/kubeblocks-addons/pull/358) +*VictoriaMetrics* +- Supports VictoriaMetrics v1.100.1. [#479](https://github.com/apecloud/kubeblocks-addons/pull/479) + +## API deprecations, and other changes for Release 0.9 + +- ConfigConstraint API becomes stable and upgrades from v1alpha1 to v1beta1. +- The group of StorageProvider changes and is migrated from storage.kubeblocks.io to dataprotection.kubeblocks.io. +- ClusterVersion v1alpha1 CRD will be removed in Release 1.0. +- ComponentClassDefinition v1alpha1 CRD will be removed in Release 1.0. +- ComponentResourceConstraint v1alpha1 CRD will be removed in Release 1.0. +- ClusterDefinition API + - type, componentDefs, connectionCredential will be removed in Release 1.0. +- Cluster API + - Scheduling: tenancy and availabilityPolicy will be removed in Release 1.0. + - API simplification: replicas, resources, storage, and network will be removed in Release 1.0. +- ComponentDefinition API + - switchPolicy will be removed in Release 1.0. The same capability can be achieved using the componentDefinition.spec.lifecycleActions.switchover API. +- ServiceRef API + - Cluster will be removed in Release 1.0. The same capability can be achieved using serviceRef.clusterServiceSelector. + +Additionally, all fields referencing the above APIs are also marked as deprecated and will be removed in Release 1.0. +- clusterVersionRef +- componentDefRef +- classDefRef +KubeBlocks Release 0.9 still maintains the compatibility with API marked as deprecated. +Deprecated Features +kbcli Deprecated features +- bench, fault, and migration subcommands are removed to streamline functions. + + +## Upgrade to v0.9 +Refer to [Upgrade to KubeBlocks v0.9](../../user_docs/upgrade/upgrade-to-0_9_0). + diff --git a/docs/en/release-1_0_1/release_notes/v0.9.0/_category_.yml b/docs/en/release-1_0_1/release_notes/v0.9.0/_category_.yml new file mode 100644 index 00000000..29e856b5 --- /dev/null +++ b/docs/en/release-1_0_1/release_notes/v0.9.0/_category_.yml @@ -0,0 +1,4 @@ +position: 1 +label: v0.9.0 +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/concepts/_category_.yml b/docs/en/release-1_0_1/user_docs/concepts/_category_.yml new file mode 100644 index 00000000..e2295dd6 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/_category_.yml @@ -0,0 +1,4 @@ +position: 2 +label: Concepts and Features +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/_category_.yaml b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/_category_.yaml new file mode 100644 index 00000000..8652fa87 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/_category_.yaml @@ -0,0 +1,4 @@ +position: 3 +label: Backup and Restore +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/_category_.yaml b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/_category_.yaml new file mode 100644 index 00000000..25665341 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/_category_.yaml @@ -0,0 +1,4 @@ +position: 3 +label: Backup +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx new file mode 100644 index 00000000..9e0c32f3 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx @@ -0,0 +1,648 @@ +--- +title: Configure BackupRepo +description: How to configure BackupRepo +keywords: [introduction, backup, restore] +sidebar_position: 1 +sidebar_label: Configure BackupRepo +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Introduction + +BackupRepo is the storage repository for backup data. Currently, KubeBlocks supports configuring various object storage services as backup repositories, including OSS (Alibaba Cloud Object Storage Service), S3 (Amazon Simple Storage Service), COS (Tencent Cloud Object Storage), GCS (Google Cloud Storage), OBS (Huawei Cloud Object Storage), Azure Blob Storage, MinIO, and other S3-compatible services. + +You can create multiple BackupRepos to suit different scenarios. For example, based on different businesses, the data of business A is stored in repository A, and the data of business B is stored in repository B. Or you can configure multiple repositories by region to realize geo-disaster recovery. But it is required to specify backup repositories when you create a backup. You can also create a default backup repository and KubeBlocks uses this default repository to store backup data if no specific repository is specified. + +## Before you start + +Make sure you have all the following prepared. + +* [Install kbcli](./../../../references/install-kbcli). +* [Install kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). +* [Install Helm](https://helm.sh/docs/intro/install/). +* [Install KubeBlocks](./../../../overview/install-kubeblocks). + +## Configure BackupRepo + +With object storage services prepared, it's time to configure BackupRepo. KubeBlocks provides two ways for the configuration: + +* Automatic BackupRepo configuration during KubeBlocks installation; +* Manual BackupRepo configuration for on-demand scenarios. + +:::tip + +If you don't have an object storage service from a cloud provider, you can deploy the open-source service MinIO in Kubernetes following the [Install MinIO](../../../references/install-minio) guide. + +::: + + +### Access BackupRepo + +There are two methods to access remote object storage: + +| Method | Description | Requirements | Security Consideration | +|--------|-------------|--------------|------------------------| +| Tool | Uses command-line tools to directly access remote storage | No additional driver required | Synchronizes credentials as secrets across namespaces | +| Mount | Mounts remote storage locally using a CSI driver | Requires CSI driver installation | No credential sharing between namespaces | + +The access method is specified in the `accessMethod` field when creating a BackupRepo and cannot be changed later. + +**Recommendation**: +- Use "Tool" method for simpler setups in trusted environments +- Use "Mount" method for enhanced security in multi-tenant scenarios + +### Manual BackupRepo configuration + +If you do not configure the BackupRepo information when installing KubeBlocks, you can manually configure it by the following instructions. + + + + + +1. Install the S3 CSI driver (only used in the Mount method). + + ```bash + helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts + + helm install csi-s3 yandex-s3/csi-s3 -n kb-system + ``` + For more information, please refer to [Yandex Cloud CSI S3 Driver](https://github.com/yandex-cloud/k8s-csi-s3). + +2. Create BackupRepo. + + + + + + ```bash + # Create a secret to save the access key for S3 + kubectl create secret generic s3-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: s3 + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + endpoint: "" + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: cn-northwest-1 + credential: + name: s3-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for OSS + kubectl create secret generic oss-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: oss + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: cn-zhangjiakou + credential: + name: oss-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for OBS + kubectl create secret generic obs-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: obs + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: cn-north-4 + credential: + name: obs-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for COS + kubectl create secret generic cos-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: cos + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: ap-guangzhou + credential: + name: cos-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for GCS + kubectl create secret generic gcs-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: gcs-s3comp + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: auto + credential: + name: gcs-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for MinIO + kubectl create secret generic minio-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: minio + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: + credential: + name: minio-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for the S3 compatible storage + kubectl create secret generic s3-comp-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: s3-compatible + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + endpoint: + forcePathStyle: true + credential: + name: s3-comp-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # Create a secret to save the access key for the Azure Blob Storage + kubectl create secret generic azureblob-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accountName= \ + --from-literal=accountKey= + + # Create the BackupRepo resource + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: azureblob + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + container: test-kb-backup + credential: + name: azureblob-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + +3. View the BackupRepo and its status. If the status is `Ready`, the BackupRepo is ready. + + ```bash + kubectl get backuprepo + ``` + + + + + +1. Install the S3 CSI driver (only used in the Mount method). + + ```bash + # Enable the CSI-S3 addon + kbcli addon enable csi-s3 + + # You can add flags to customize the installation of this addon + # CSI-S3 installs a daemonSet Pod on all nodes by default and you can set tolerations to install it on the specified node + kbcli addon enable csi-s3 \ + --tolerations '[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' \ + --tolerations 'daemonset:[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' + + # View the status of CSI-S3 driver and make sure it is Enabled + kbcli addon list csi-s3 + ``` + +2. Create BackupRepo. + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider s3 \ + --region cn-northwest-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + You can also specify `--access-method` as `Mount`. + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider oss \ + --region cn-zhangjiakou \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + You can also explicitly specify the OSS endpoint using the `--endpoint` flag. For example, + + ```bash + kbcli backuprepo create my-repo \ + --provider oss \ + --region cn-zhangjiakou \ + --bucket test-kb-backup \ + --endpoint https://oss-cn-zhangjiakou-internal.aliyuncs.com \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider obs \ + --region cn-north-4 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + For COS, the naming format of a bucket is ``,where APPID is automatically generated by Tencent Cloud. When setting `--bucket`, first create the bucket on the Tencent Cloud console and retrieve the bucket name. + + ```bash + kbcli backuprepo create my-repo \ + --provider cos \ + --region ap-guangzhou \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider gcs-s3comp \ + --region auto \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + GCS supported by KubeBlocks is the S3-compatible version provided by Google Cloud. + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider minio \ + --endpoint \ + --bucket test-minio \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + The address for the deployed MinIO is http://minio.kb-system.svc.cluster.local:9000. + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider s3-compatible \ + --endpoint \ + --bucket test-minio \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --force-path-style=true \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider azureblob \ + --container test-kb-backup \ + --azure-account-name \ + --azure-account-key \ + --access-method Tool \ + --default + ``` + + + + + + The above command creates a default backup repository `my-repo`. + + * `my-repo` is the name of the created backup repository. If you do not specify a name, the system creates a random name, following the format `backuprepo-xxxxx`. + * `--default` means that this repository is set as the default repository. Note that there can only be one default global repository. If there exist multiple default repositories, KubeBlocks cannot decide which one to use (similar to the default StorageClass of Kubernetes), which further results in backup failure. Using kbcli to create BackupRepo can avoid such problems because kbcli checks whether there is another default repository before creating a new one. + * `--provider` specifies the storage type, i.e. `storageProvider`, and is required for creating a BakcupRepo. The available values are `s3`, `cos`, `gcs-s3comp`, `obs`, `oss`, `azureblob`, `minio`, `s3-compatible`, `ftp`, and `nfs`. Parameters for different storage providers vary and you can run `kbcli backuprepo create --provider STORAGE-PROVIDER-NAME -h` to view the flags for different storage providers. Please note that `--provider` is mandatory in configuration. + + After `kbcli backuprepo create` is executed successfully, the system creates the K8s resource whose type is `BackupRepo`. You can modify the annotation of this resource to adjust the default repository. + + ```bash + # Cancel the default repository + kubectl annotate backuprepo old-default-repo \ + --overwrite=true \ + dataprotection.kubeblocks.io/is-default-repo=false + ``` + + ```bash + # Set a new default repository + kubectl annotate backuprepo backuprepo-4qms6 \ + --overwrite=true \ + dataprotection.kubeblocks.io/is-default-repo=true + ``` + +3. View the BackupRepo and its status. If the status is `Ready`, the BackupRepo is ready. + + ```bash + kbcli backuprepo list + ``` + + + + + +:::note + +If the BackupRepo status shows Failed or remains in PreChecking for a long time, run `kubectl describe backuprepo my-repo` or `kbcli backuprepo describe my-repo` to check the `status.conditions` for details. + +To troubleshoot: + +* Check whether configuration parameters, such as `endpoint`, `accessKeyId`, and `secretAccessKey`, are correctly specified. +* For self-hosted object storage (e.g., Ceph Object Storage), try using `s3-compatible` as StorageProvider. The default `s3` StorageProvider uses a virtual hosting URL style, which some self-hosted storage may not support. +* If an `InvalidLocationConstraint` error occurs, check whether its parameter is correctly configured. If this error persists, leave the `region` parameter empty and try again. +* If the status remains in the `PreChecking` state, check your network connection. Ensure the storage service is accessible from within the Kubernetes cluster. You can test this by running a Pod and connecting to the storage service using the corresponding client. +* KubeBlocks uses [rclone](https://rclone.org/) internally for data transfer. Check whether rclone can successfully access the storage service. + +::: + +### Automatic BackupRepo configuration + +You can specify the BackupRepo information in a YAML configuration file when installing KubeBlocks, and KubeBlocks will create the BackupRepo accordingly. + +1. Prepare the configuration file. + + Taking AWS S3 as an example, the configuration file `backuprepo.yaml` is: + + ```yaml + backupRepo: + create: true + storageProvider: s3 + config: + region: cn-northwest-1 + bucket: test-kb-backup + secrets: + accessKeyId: + secretAccessKey: + ``` + + * `region`: specifies the region where S3 is located. + * `bucket`: specifies the bucket name of S3. + * `accessKeyId`: specifies the Access Key of AWS. + * `secretAccessKey`: specifies the Secret Key of AWS. + * `storageProvider`:specifies the object storage provider, which is S3 in this case. + +:::note + +* In KubeBlocks, the available `storageProvider` options are `s3`, `cos`, `gcs-s3comp`, `obs`, `oss`, `azureblob`, `minio`, `s3-compatible`, `ftp`, and `nfs`. +* For different `storageProvider`, the configuration may differ. `config` and `secrets` in the above example are applied to S3. +* Execute the command `kubectl get storageproviders.dataprotection.kubeblocks.io` to view the supported `storageProvider` options. + +::: + +2. Specify the configuration file when installing KubeBlocks. + + + + + + ```bash + kubectl create -f backuprepo.yaml + ``` + + Use the command below to check the BackupRepo after installation. + + ```bash + kubectl get backuprepo + ``` + + + + + + ```bash + kbcli kubeblocks install -f backuprepo.yaml + ``` + + Use the command below to check the BackupRepo after installation. + + ```bash + kbcli backuprepo list + ``` + + + + \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx new file mode 100644 index 00000000..21a44329 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx @@ -0,0 +1,172 @@ +--- +title: Configure BackupPolicy +description: How to configure BackupPolicy +keywords: [backup, backup policy] +sidebar_position: 2 +sidebar_label: Configure BackupPolicy +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Configure BackupPolicy + +## Configure encryption key + +To ensure that the restored cluster can access the data properly, KubeBlocks encrypts the cluster's credentials during the backup process and securely stores it in the Annotation of the Backup object. Therefore, to protect your data security, it is strongly recommended to carefully assign Get/List permissions for backup objects and specify an encryption key during the installation or upgrade of KubeBlocks. These measures will help ensure the proper protection of your data. + +KubeBlocks has integrated data encryption functionality for datasafed since v0.9.0. Currently, the supported encryption algorithms include `AES-128-CFB`, `AES-192-CFB`, and `AES-256-CFB`. This function allows backup data to be encrypted before being written to storage. The encryption key then will be used to encrypt connection passwords and also to back up data. You can reference existing keys or create different secret keys for database clusters according to actual needs. + +### Reference an existing key + +If the secret already exists, you can choose to directly reference it without setting the `dataProtection.encryptionKey`. KubeBlocks provides a quick way to reference an existing key for encryption. + +Assuming there is a pre-defined secret named `dp-encryption-key` and a key `encryptionKey` inside it. For example, a secret created by this command. + +```bash +kubectl create secret generic dp-encryption-key \ + --from-literal=encryptionKey='S!B\*d$zDsb=' +``` + +And then you can reference it when installing or upgrading KubeBlocks. + +```bash +kbcli kubeblocks install \ + --set dataProtection.encryptionKeySecretKeyRef.name="dp-encryption-key" \ + --set dataProtection.encryptionKeySecretKeyRef.key="encryptionKey" +# The above command is equivalent to: +# kbcli kubeblocks install --set dataProtection.encryptionKey='S!B\*d$zDsb=' +``` + +### Create a new key + +If you do not need to enable backup encryption by default, or if you need to use a separate `encryptionKey`, just create a Secret and manually enable backup encryption by following the steps below. + +1. Create a Secret to store the encryption key. + + ```bash + kubectl create secret generic backup-encryption \ + --from-literal=secretKey='your secret key' + ``` + +2. Enable encryption. + + Remember to reference the key created before. + + ```bash + kubectl --type merge patch backuppolicy mysqlcluster-mysql-backup-policy \ + -p '{"spec":{"encryptionConfig":{"algorithm":"AES-256-CFB","passPhraseSecretKeyRef":{"name":"backup-encryption","key":"secretKey"}}}}' + ``` + +:::note + +You can also use `kbcli` to simplify the process. + +```bash +# enable encryption +kbcli cluster edit-backup-policy --set encryption.algorithm=AES-256-CFB --set encryption.passPhrase="SECRET!" + +# disable encryption +kbcli cluster edit-backup-policy --set encryption.disabled=true +``` + +::: + +Now you can perform backups and restores as usual. + +:::note + +The secret created in Step 1 should not be modified or deleted; otherwise, decryption of backups may fail. + +::: + +By default, the `encrytpionKey` is only used for encrypting the connection password, if you want to use it to encrypt backup data as well, add `--set dataProtection.enableBackupEncryption=true` to the above command. After that, all newly-created clusters are enabled for backup encryption by default. + +## Create a cluster + +Prepare a cluster for testing the backup and restore function. The following instructions use the MySQL cluster `mycluster` in the default namespace as an example. + +```shell +# Create a MySQL cluster +kbcli cluster create mysql mycluster + +# View backupPolicy +kbcli cluster list-backup-policies mycluster +> +NAME NAMESPACE DEFAULT CLUSTER COMPONENT CREATE-TIME STATUS +mycluster-mysql-backup-policy default true mycluster mysql May 26,2025 18:11 UTC+0800 Available +``` + +By default, all the backups are stored in the default global repository. You can execute the following command to view all BackupRepos. When the `DEFAULT` field is `true`, the BackupRepo is the default BackupRepo. + +```bash +# View BackupRepo +kbcli backuprepo list +``` + +## View BackupPolicy + +After creating a database cluster, a BackupPolicy is created automatically for databases that support backup. Execute the following command to view the BackupPolicy of the cluster. + + + + + +```bash +kubectl get backuppolicy -l app.kubernetes.io/instance=mycluster +> +NAME BACKUP-REPO STATUS AGE +mycluster-mysql-backup-policy Available 83s +``` + + + + + +```bash +kbcli cluster list-backup-policies mycluster +> +NAME NAMESPACE DEFAULT CLUSTER COMPONENT CREATE-TIME STATUS +mycluster-mysql-backup-policy default true mycluster mysql May 26,2025 18:11 UTC+0800 Available +``` + + + + + +The backup policy includes the backup methods supported by the cluster. Execute the following command to view the backup methods. + + + + + +```bash +kubectl get backuppolicy mycluster-mysql-backup-policy -o yaml +``` + + + + + +```bash +kbcli cluster describe-backup-policy mycluster +> +Summary: + Name: mycluster-mysql-backup-policy + Cluster: mycluster + Component: mysql + Namespace: default + Default: true + +Backup Methods: +NAME ACTIONSET SNAPSHOT-VOLUMES +xtrabackup mysql-xtrabackup-br false +volume-snapshot mysql-volume-snapshot true +archive-binlog mysql-pitr false +``` + + + + + +For a MySQL cluster, two default backup methods are supported: `xtrabackup` and `volume-snapshot`. The former uses the backup tool `xtrabackup` to backup MySQL data to an object storage, while the latter utilizes the volume snapshot capability of cloud storage to backup data through snapshots. When creating a backup, you can specify which backup method to use. diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx new file mode 100644 index 00000000..d8b39bb9 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx @@ -0,0 +1,138 @@ +--- +title: On-demand backup +description: How to back up databases on-demand by snapshot and backup tool +keywords: [backup, on-demand backup, snapshot backup, backup tool] +sidebar_position: 4 +sidebar_label: On-demand backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# On-demand backup + +KubeBlocks supports on-demand backups. You can customize your backup method by specifying `--method`. The instructions below take using a backup tool and volume snapshot as examples. + +## Backup tool + +The following command uses the `xtrabackup` backup method to create a backup named `mybackup`. + + + + + +To Create a backup: +```bash +kubectl apply -f - < +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +mybackup mycluster-mysql-backup-policy xtrabackup kb-oss Completed 1632402 10s Delete 2025-05-26T10:14:33Z 2025-05-26T10:14:42Z +``` + + + + +Create a backup +```bash +kbcli cluster backup mycluster --name mybackup --method xtrabackup +> +Backup mybackup created successfully, you can view the progress: + kbcli cluster list-backups --names=mybackup -n default +``` + +View the backup +```bash +kbcli cluster list-backups --names mybackup +> +NAME NAMESPACE SOURCE-CLUSTER METHOD STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATE-TIME COMPLETION-TIME EXPIRATION +mybackup default mycluster xtrabackup Completed 1632402 10s Delete May 26,2025 18:14 UTC+0800 May 26,2025 18:14 UTC+0800 +``` + + + + + +## Volume snapshot backup + +:::note +**Prerequisites** +Volume snapshot backups require: +- StorageClass must support volume snapshots + +Please check the list of CS Drivers and their features at: https://kubernetes-csi.github.io/docs/drivers.html + +::: + +To create a backup using the snapshot, the `backupMethod` in the YAML configuration file or the `--method` field in the kbcli command should be set to `volume-snapshot`. + + + + + +```bash +# Create a backup +kubectl apply -f - < +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +my-snapshot-backup mycluster2-mysql-backup-policy volume-snapshot Running Delete 2025-05-26T10:30:10Z +``` + + + + + +```bash +# Create a backup +kbcli cluster backup mycluster --name my-snapshot-backup --method volume-snapshot +> +Backup mybackup created successfully, you can view the progress: + kbcli cluster list-backups --names=mybackup -n default + +# View the backup +kbcli cluster list-backups --names=my-snapshot-backup -n default +> +NAME NAMESPACE SOURCE-CLUSTER METHOD STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATE-TIME COMPLETION-TIME EXPIRATION +my-snapshot-backup default mycluster volume-snapshot Running Delete May 26,2025 18:30 UTC+0800 +``` + + + + + +:::caution + +1. When creating backups using snapshots, ensure that the storage used supports the snapshot feature; otherwise, the backup may fail. + +2. Backups created manually using `kbcli` will not be automatically deleted. You need to manually delete them. + +::: diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx new file mode 100644 index 00000000..76a13563 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx @@ -0,0 +1,86 @@ +--- +title: Scheduled backup +description: How to back up databases by schedule +keywords: [backup and restore, schedule, automatic backup, scheduled backup] +sidebar_position: 3 +sidebar_label: Scheduled backup +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Scheduled backup + +KubeBlocks supports configuring scheduled backups for clusters. + + + + + +Modify the backup field with kubectl as follows. + +```bash +kubectl edit cluster -n default mycluster +``` + +Edit the cluster YAML. + +```yaml +spec: + ... + backup: + # Whether to enable automatic backups + enabled: true + # UTC timezone, the example below stands for 2 A.M. every Monday + cronExpression: 0 18 * * * + # Use xtrabackup for backups. If your storage supports snapshot, you can change it to volume-snapshot + method: xtrabackup + # Whether to enable PITR + pitrEnabled: false + # Retention period for a backup set + retentionPeriod: 7d + # BackupRepo + repoName: my-repo +``` + +In the above YAML file, you can set whether to enable automatic backups and PITR as needed, and also specify backup methods, repo names, retention periods, etc. + + + + + +```bash +kbcli cluster update mycluster --backup-enabled=true \ +--backup-method=xtrabackup --backup-repo-name=my-repo \ +--backup-retention-period=7d --backup-cron-expression="0 18 * * *" +``` + +- `--backup-enabled` indicates whether to enable scheduled backups. +- `--backup-method` specifies the backup method. You can use the `kbcli cluster describe-backup-policy mycluster` command to view the supported backup methods. +- `--backup-repo-name` specifies the name of the BackupRepo. +- `--backup-retention-period` specifies the retention period for backups, which is 7 days in the example. +- `--backup-cron-expression` specifies the backup schedule using a cron expression in UTC timezone. Refer to [cron](https://en.wikipedia.org/wiki/Cron) for the expression format. + + + + + +After the scheduled backup is enabled, execute the following command to check if a CronJob object has been created: + +```bash +kubectl get cronjob +> +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +96523399-mycluster-default-xtrabackup 0 18 * * * False 0 57m +``` + +You can also execute the following command to view cluster information, where the `Data Protection:` section displays the configuration details of automatic backups. + +```bash +kbcli cluster describe mycluster +> +... +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION +my-repo Enabled 0 18 * * * xtrabackup 7d +``` diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/introduction.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/introduction.mdx new file mode 100644 index 00000000..6ee51625 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/introduction.mdx @@ -0,0 +1,31 @@ +--- +title: Introduction +description: Introduction of KubeBlocks backup and restore functions +keywords: [introduction, backup, restore] +sidebar_position: 1 +sidebar_label: Introduction +--- + +# Introduction + +KubeBlocks provides the backup and restore function to ensure the safety and reliability of your data. The backup and restore function of KubeBlocks relies on BackupRepo and before using the full backup and restore function, you need to [configure BackupRepo first](./backup/backup-repo.md). + +KubeBlocks adopts physical backup which takes the physical files in a database as the backup object. You can choose one backup option based on your demands to back up the cluster data on demand or by schedule. + +* [On-demand backup](./backup/on-demand-backup.md): Based on different backup options, on-demand backup can be further divided into backup tool and snapshot backup. + * Backup tool: You can use the backup tool of the corresponding data product, such as MySQL XtraBackup and PostgreSQL pg_basebackup. KubeBlocks supports configuring backup tools for different data products. + * Snapshot backup: If your data is stored in a cloud disk that supports snapshots, you can create a data backup by snapshots. Snapshot backup is usually faster than a backup tool, and thus is recommended. + +* [Scheduled backup](./backup/scheduled-backup.md): You can specify retention time, backup method, time, and other parameters to customize your backup schedule. + +As for the restore function, KubeBlocks supports restoring data from the backup set. + +* Restore + * [Restore data from the backup set](./restore/restore-data-from-backup-set.md) + +Follow the steps below to back up and restore your cluster. + +1. [Configure BackupRepo](./backup/backup-repo.md). +2. [Configure BackupPolicy](./backup/configure-backuppolicy.md). +3. Backup your cluster [on demand](./backup/on-demand-backup.md) or [by schedule](./backup/scheduled-backup.md). +4. Restore your data by [PITR](./restore/pitr.md) or from the [backup set](./restore/restore-data-from-backup-set.md). diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/_category_.yaml b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/_category_.yaml new file mode 100644 index 00000000..027fc711 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/_category_.yaml @@ -0,0 +1,4 @@ +position: 3 +label: Restore +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/pitr.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/pitr.mdx new file mode 100644 index 00000000..2cc3b746 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/pitr.mdx @@ -0,0 +1,115 @@ +--- +title: PITR +description: How to perform PITR +keywords: [backup and restore, restore, PITR, postgresql] +sidebar_position: 2 +sidebar_label: Point-in-Time Recovery +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PITR + +## What is PITR (Point-in-Time Recovery) + +PITR (Point-in-Time Recovery) is a database backup and recovery technique commonly used in Relational Database Management Systems (RDBMS). It allows for the recovery of data changes to a specific point in time, restoring the database to a state prior to that point. In PITR, the database system regularly creates full backups and logs all transactions thereafter, including insert, update, and delete operations. During recovery, the system first restores the most recent full backup, and then applies the transaction logs recorded after the backup, bringing the database back to the desired state. + +KubeBlocks supports PITR for databases such as MySQL and PostgreSQL. This documentation takes PostgreSQL PITR as an example. Please refer to [PostgreSQL Backup and Restore](../../../../kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr) for more details. + +## How to perform PITR? + +**Step 1. View the timestamps to which the cluster can be restored.** + + + + + +```bash +# Get the backup time range for Continuous Backup +kubectl get backup -l app.kubernetes.io/instance=pg-cluster -l dataprotection.kubeblocks.io/backup-type=Continuous -oyaml +... +status: + timeRange: + end: "2024-05-07T10:47:14Z" + start: "2024-05-07T10:07:45Z" +``` + +It can be seen that the current backup time range is `2024-05-07T10:07:45Z ~2024-05-07T10:47:14Z`. Still, a full backup is required for data restoration, and this full backup must be completed within the time range of the log backups. + + + + + +```bash +kbcli cluster describe pg-cluster +> +... +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME +minio Enabled */5 * * * * archive-wal 8d May 07,2024 15:29:46 UTC+0800 ~ May 07,2024 15:48:47 UTC+0800 +``` + +`RECOVERABLE-TIME` represents the time range within which the cluster can be restored. + +It can be seen that the current backup time range is `May 07,2024 15:29:46 UTC+0800 ~ May 07,2024 15:48:47 UTC+0800`. Still, a full backup is required for data restoration, and this full backup must be completed within the time range of the log backups. + + + + + +**Step 2. Restore the cluster to a specific point in time.** + + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: +name: pg-cluster-pitr +spec: + clusterName: pg-cluster-pitr + restore: + backupName: 818aa0e0-pg-kubeblocks-cloud-n-archive-wal + restorePointInTime: "2024-05-07T10:07:45Z" + volumeRestorePolicy: Parallel + type: Restore +``` + + + + + +```bash +kbcli cluster restore pg-cluster-pitr --restore-to-time 'May 07,2024 15:48:47 UTC+0800' --backup +``` + + + + + +**Step 3. Check the status of the new cluster.** + + + + + +```bash +kubectl get cluster pg-cluster-pitr +``` + + + + + +```bash +kbcli cluster list pg-cluster-pitr +``` + + + + + +Once the status turns to `Running`, it indicates a successful operation. diff --git a/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx new file mode 100644 index 00000000..6b14110d --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx @@ -0,0 +1,59 @@ +--- +title: Restore data from backup set +description: How to restore data from backup set +keywords: [backup and restore, restore, backup set] +sidebar_position: 1 +sidebar_label: Restore from backup set +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Restore data from backup set + +KubeBlocks supports restoring clusters from backups. This documentation takes MySQL as an example. Please refer to [MySQL Backup and Restore](../../../../kubeblocks-for-mysql/05-backup-restore/05-restoring-from-full-backup) for more details. + +**Step 1. View backups.** + + + + + +```bash +kubectl get backups +``` + + + + + +For existing clusters, execute: + +```bash +kbcli cluster list-backups mycluster +``` + +If the cluster has been deleted, execute: + +```bash +kbcli dataprotection list-backups +``` + + + + + +**Step 2. Restore clusters from a specific backup.** + +```bash +# Restore new cluster +kbcli cluster restore myrestore --backup mybackup +> +Cluster myrestore created + +# View the status of the restored cluster +kbcli cluster list myrestore +> +NAME NAMESPACE CLUSTER-DEFINITION TERMINATION-POLICY STATUS CREATED-TIME +myrestore default mysql Delete Running May 26,2025 18:42 UTC+0800 +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/concept.mdx b/docs/en/release-1_0_1/user_docs/concepts/concept.mdx new file mode 100644 index 00000000..4fcec6ee --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/concept.mdx @@ -0,0 +1,160 @@ +--- +title: Concepts +description: KubeBlocks, CRD +keywords: [kubernetes operator, kubeblocks, database, concepts] +sidebar_position: 1 +--- + +# Concepts + +You've already seen the benefits of using unified APIs to represent various databases in the section ["How Unified APIs Reduce Your Learning Curve"](./../overview/introduction). If you take a closer look at those examples, you'll notice two key concepts in the sample YAML files: **Cluster** and **Component**. For instance, `test-mysql` is a Cluster that includes a Component called `mysql` (with a componentDef of `apecloud-mysql`). Similarly, `test-redis` is also a Cluster, and it includes two Components: one called `redis` (with a componentDef of `redis-7`), which has two replicas, and another called `redis-sentinel` (with a componentDef of `redis-sentinel`), which has three replicas. + +In this document, we will explain the reasons behind these two concepts and provide a brief introduction to the underlying API (i.e., CRD). + +## Motivation of KubeBlocks’ Layered API +In KubeBlocks, to support the management of various databases through a unified API, we need to abstract the topologies and characteristics of different databases. + +We’ve observed that database systems deployed in production environments often use a topology composed of multiple components. For example, a production MySQL cluster might include several Proxy nodes (such as ProxySQL, MaxScale, Vitess, WeScale, etc.) alongside multiple MySQL server nodes (like MySQL Community Edition, Percona, MariaDB, ApeCloud MySQL, etc.) to achieve higher availability and read-write separation. Similarly, Redis deployments typically consist of a primary node and multiple read replicas, managed for high availability via Sentinel. Some users even use twemproxy for horizontal sharding to achieve greater capacity and throughput. + +This modular approach is even more pronounced in distributed database systems, where the entire system is divided into distinct components with clear and singular responsibilities, such as data storage, query processing, transaction management, logging, and metadata management. These components interact over the network to ensure strong consistency and transactional guarantees similar to those of a single-node database, enabling complex operations such as load balancing, distributed transactions, and disaster recovery with failover capabilities. + +So KubeBlocks employs a design of layered API (i.e. CRDs), consisting of **Cluster** and **Component**, to accommodate the multi-component and highly variable deployment topology of database systems. These abstractions allow us to flexibly represent and manage the diverse and dynamic topologies of database systems when deployed on Kubernetes, and to easily assemble Components into Clusters with the chosen topology. + +Components serve as the building blocks of a Cluster. Actually Addon developers can define how multiple Components are assembled into different topologies within the ClusterDefinition (But wait, does that sound complicated? If you're not an Addon developer, you don't need to worry about the details of ClusterDefinition; you just need to know that Addons can provide different topologies for you to choose from). For example, the Redis Addon provides three topologies: "standalone" "replication" and "replication-twemproxy". Users can specify the desired topology when creating a Cluster. +Here is an example that creates a Redis Cluster with `clusterDef` and `topology`: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-redis-use-topology + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +If you have a sharp eye, you'll notice that by specifying `clusterDef` and `topology` in Cluster, you no longer need to specify `componentDef` for each Component. + +Lastly, here’s an interesting fact: do you know why this project is called KubeBlocks? You see, through the Component API, we package database containers into standardized building blocks that can be assembled into a database Cluster according to the specified topology and run on Kubernetes. We think this process feels a lot like building with Lego blocks. + +## Take a closer look at the KubeBlocks API + +The major KubeBlocks CRDs are illustrated in the diagram below. We have specifically highlighted the layered structure of the API. Other important APIs, such as OpsRequest, Backup, and Restore, are not shown in this diagram. They were omitted to keep the focus on the layering, making the diagram clearer. We will explain these additional APIs in other documents. + +![KubeBlocks API Layers](/img/docs/en/kubeblocks_api_layers.png) + +KubeBlocks' CRDs can be categorized into two major classes: those for users and those for Addons. + +**CRDs for users** + +The CRDs for users include Cluster, Component, and InstanceSet. When creating a database cluster with KubeBlocks, these CRs will be generated. Specifically: +- The Cluster object is created by the user. +- The Component object is a child resource recursively created by the KubeBlocks Cluster Controller when it detects the Cluster object. +- The InstanceSet object is a child resource recursively created by the KubeBlocks Component Controller when it detects the Component object. The InstanceSet Controller then recursively creates the Pod and PVC objects. + +**CRDs for Addons** + +The CRDs for Addons include ClusterDefinition, ComponentDefinition, and ComponentVersion. These CRs are written by Addon developers and bundled within the Addon's Helm chart. + +:::note + +Although users do not need to write CRs for ClusterDefinition and ComponentDefinition, they do need to use these CRs. As seen in the previous examples of creating a Redis Cluster, when users create a Cluster, they either specify the name of the corresponding ComponentDefinition CR in each Component's `componentDef` or specify the name of the corresponding ClusterDefinition CR in `clusterDef` and the desired topology. +::: + + +### KubeBlocks API for User + +#### Cluster +A Cluster object represents an entire database cluster managed by KubeBlocks. A Cluster can include multiple Components. Users specify the configuration for each Component here, and the Cluster Controller will generate and reconcile corresponding Component objects. Additionally, the Cluster Controller manages all Service addresses that are exposed at the Cluster level. + +For distributed databases with a shared-nothing architecture, like Redis Cluster, the Cluster supports managing multiple shards, with each shard managed by a separate Component. This architecture also supports dynamic resharding: if you need to scale out and add a new shard, you simply add a new Component; conversely, if you need to scale in and reduce the number of shards, you remove a Component. + +#### Component +Component is a fundamental building block of a Cluster object. For example, a Redis Cluster can include Components like `redis`, `sentinel`, and potentially a proxy like `twemproxy`. + +The Component object is responsible for managing the lifecycle of all replicas within a Component, It supports a wide range of operations including provisioning, stopping, restarting, termination, upgrading, configuration changes, vertical and horizontal scaling, failover, switchover, scheduling configuration, exposing Services, managing system accounts. + +Component is an internal sub-object derived from the user-submitted Cluster object. It is designed primarily to be used by the KubeBlocks controllers, users are discouraged from modifying Component objects directly and should use them only for monitoring Component statuses. + +#### InstanceSet +Starting from KubeBlocks v0.9, we have replaced StatefulSet with InstanceSet. + +A database instance, or replica, consists of a Pod and several other auxiliary objects (PVC, Service, ConfigMap, Secret). InstanceSet is a Workload CRD responsible for managing a group of instances. In KubeBlocks, all workloads are ultimately managed through InstanceSet. Compared to Kubernetes native Workload CRDs like StatefulSet and Deployment, InstanceSet incorporates more considerations and designs specific to the database domain, such as each replica's role, higher availability requirements, and operational needs like taking specific nodes offline. + +### KubeBlocks API for Addon + +:::note + +Only Addon developers need to understand the ClusterDefinition and ComponentDefinition APIs. As a result, KubeBlocks users can easily bypass these two APIs. +::: + +#### ClusterDefinition +ClusterDefinition is an API used to define all available topologies of a database cluster, offering a variety of topological configurations to meet diverse deployment needs and scenarios. + +Each topology includes a list of components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. For example, ComponentDefinition of widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, simplifying the setup of new systems. + +Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for components, ensuring a controlled and predictable management of component lifecycles. + +#### ComponentDefinition +ComponentDefinition serves as a reusable blueprint or template for creating Components, encapsulating essential static settings such as Component description, Pod templates, configuration file templates, scripts, parameter lists, injected environment variables and their sources, and event handlers. ComponentDefinition works in conjunction with dynamic settings from the Component, to instantiate Components during Cluster creation. + +Key aspects that can be defined in a ComponentDefinition include: + +- PodSpec template: Specifies the PodSpec template used by the Component. +- Configuration templates: Specify the configuration file templates required by the Component. +- Scripts: Provide the necessary scripts for Component management and operations. +- Storage volumes: Specify the storage volumes and their configurations for the Component. +- Pod roles: Outlines various roles of Pods within the Component along with their capabilities. +- Exposed Kubernetes Services: Specify the Services that need to be exposed by the Component. +- System accounts: Define the system accounts required for the Component. + +ComponentDefinitions also enable defining reactive behaviors of the Component in response to events, such as member join/leave, Component addition/deletion, role changes, switch over, and more. This allows for automatic event handling, thus encapsulating complex behaviors within the Component. + + +## What's Addon + +KubeBlocks uses Addons to extend support for various database engines. An Addon represents an extension for a specific database engine, such as MySQL Addon, PostgreSQL Addon, Redis Addon, MongoDB Addon, and Kafka Addon. There are currently over 30 Addons available in the KubeBlocks repository. + +An Addon includes CRs (Custom Resources) based on the ClusterDefinition, ComponentDefinition, and ComponentVersion CRDs, as well as some ConfigMaps (used as configuration templates or script file templates), script files, CRs defining how to perform backup and restore operations, and Grafana dashboard JSON objects. + +The Addon will be packaged and installed in the form of a Helm chart. After the user installs a certain database engine's addon, they can reference the ClusterDefinition CR and ComponentDefinition CR included in the Addon when creating a Cluster, allowing them to create a Cluster for the corresponding database engine. diff --git a/docs/en/release-1_0_1/user_docs/concepts/in-place-update/_category_.yaml b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/_category_.yaml new file mode 100644 index 00000000..4c978024 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/_category_.yaml @@ -0,0 +1,4 @@ +position: 4 +label: In Place Update +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx new file mode 100644 index 00000000..a1772b54 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx @@ -0,0 +1,13 @@ +--- +title: Enable in-place update +description: Enable in-place update +keywords: [in-place update] +sidebar_position: 2 +sidebar_label: Enable in-place update +--- + +# Enable in-place update + +In Kubernetes versions below 1.27, we have seen support for in-place updates of Resources in many Kubernetes distributions. Different distributions may adopt different approaches to implement this feature. + +To accommodate these Kubernetes distributions, KubeBlocks has introduced the `IgnorePodVerticalScaling` feature switch. When this feature is enabled, KubeBlocks ignores updates to CPU and Memory in Resources during instance updates, ensuring that the Resources configuration of the final rendered Pod remains consistent with the Resources configuration of the currently running Pod. diff --git a/docs/en/release-1_0_1/user_docs/concepts/in-place-update/overview.mdx b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/overview.mdx new file mode 100644 index 00000000..517e3588 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/in-place-update/overview.mdx @@ -0,0 +1,54 @@ +--- +title: Introduction +description: Introduction +keywords: [in-place update, overview] +sidebar_position: 1 +sidebar_label: Introduction +--- + +# Overview + +In its earlier versions, KubeBlocks ultimately generated Workloads as StatefulSets. For StatefulSets, any change in the segment of PodTemplate may result in the update of all pods, and the method of update is called `Recreate`, that deletes all current pods and creates a new one. This is obviously not the best practice for database management, which has a high requirement on system availability. +To address this issue, KubeBlocks introduced the instance in-place update feature starting from version 0.9, reducing the impact on system availability during instance updates. + +## Which fields of an instance support in-place updates? + +In principle, KubeBlocks instance in-place updates leverage [the Kubernetes Pod API's in-place update capability](https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement). Therefore, the specific supported fields are as follows: + +* `annotations` +* `labels` +* `spec.activeDeadlineSeconds` +* `spec.initContainers[*].image` +* `spec.containers[*].image` +* `spec.tolerations (only supports adding Toleration)` + +Starting from Kubernetes version 1.27, support for in-place updates of CPU and Memory can be further enabled through the `InPlacePodVerticalScaling` feature switch. KubeBlocks also supports the `InPlacePodVerticalScaling` feature switch which further supports the following capabilities: + +For Kubernetes versions equal to or greater than 1.27 with InPlacePodVerticalScaling enabled, the following fields' in-place updates are supported: + +* `spec.containers[*].resources.requests["cpu"]` +* `spec.containers[*].resources.requests["memory"]` +* `spec.containers[*].resources.limits["cpu"]` +* `spec.containers[*].resources.limits["memory"]` + +It is important to note that after successful resource resizing, some applications may need to be restarted to recognize the new resource configuration. In such cases, further configuration of container `restartPolicy` is required in ClusterDefinition or ComponentDefinition. + +For PVC, KubeBlocks also leverages the capabilities of the PVC API and only supports volume expansion. If the expansion fails for some reason, it supports reverting to the original capacity. However, once a VolumeClaimTemplate in a StatefulSet is declared, it cannot be modified. Currently, the Kubernetes community is [developing this capability](https://github.com/kubernetes/enhancements/pull/4651), but it won't be available until at least Kubernetes version 1.32. + +## From the upper-level API perspective, which fields utilize in-place updates after being updated? + +KubeBlocks upper-level APIs related to instances include Cluster, ClusterDefinition, ClusterVersion, ComponentDefinition, and ComponentVersion. Within these APIs, several fields will ultimately be directly or indirectly used to render instance objects, potentially triggering in-place updates for instances. + +There are numerous fields across these APIs. See below table for brief descriptions. + +:::note + +Fields marked as deprecated or immutable in the API are not included in the list. + +::: + +| API | Fields | Description | +|:-----|:-------|:-----------| +|Cluster| `annotations`,

`labels`,

`spec.tolerations`,

`spec.componentSpecs[*].serviceVersion`,

`spec.componentSpecs[*].tolerations`,

`spec.componentSpecs[*].resources`,

`spec.componentSpecs[*].volumeClaimTemplates`,

`spec.componentSpecs[*].instances[*].annotations`,

`spec.componentSpecs[*].instances[*].labels`,

`spec.componentSpecs[*].instances[*].image`,

`spec.componentSpecs[*].instances[*].tolerations`,

`spec.componentSpecs[*].instances[*].resources`,

`spec.componentSpecs[*].instances[*].volumeClaimTemplates`,

`spec.shardingSpecs[*].template.serviceVersion`,

`spec.shardingSpecs[*].template.tolerations`,

`spec.shardingSpecs[*].template.resources`,

`spec.shardingSpecs[*].template.volumeClaimTemplates`

| Resources related fields means:

`requests["cpu"]`,

`requests["memory"]`,

`limits["cpu"]`,

`limits["memory"]` | +| ComponentVersion | `spec.releases[*].images` | Whether in-place update is triggered depends on whether the corresponding image is changed. | +| KubeBlocks Built-in | `annotations`, `labels` | | diff --git a/docs/en/release-1_0_1/user_docs/concepts/instance-template/_category_.yml b/docs/en/release-1_0_1/user_docs/concepts/instance-template/_category_.yml new file mode 100644 index 00000000..af8763e3 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/instance-template/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: Instance Template +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/instance-template/how-to-use-instance-template.mdx b/docs/en/release-1_0_1/user_docs/concepts/instance-template/how-to-use-instance-template.mdx new file mode 100644 index 00000000..5f2fc301 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/instance-template/how-to-use-instance-template.mdx @@ -0,0 +1,206 @@ +--- +title: Apply instance template +description: Apply instance template +keywords: [apply instance template, instance template] +sidebar_position: 2 +sidebar_label: Apply instance template +--- + +# Apply instance template + +Instance templates can be applied to many scenarios. In this section, we take a RisingWave cluster as an example. + +KubeBlocks supports the management of RisingWave clusters. The RisingWave addon is contributed by the RisingWave official team. For RisingWave to function optimally, it relies on an external storage solution, such as AWS S3 or Alibaba Cloud OSS, to serve as its state backend. When creating a RisingWave cluster, it is necessary to configure credentials and other information for the external storage to ensure normal operation, and this information may vary for each cluster. + +In the official image of RisingWave, this information can be injected via environment variables. Therefore, in KubeBlocks 0.9, we can configure corresponding environment variables in the instance template and set the values of these environment variables each time a cluster is created, so as to inject credential information into the container of RisingWave. + +## An example + +In the default template of RisingWave addon, [the environment variables](https://github.com/apecloud/kubeblocks-addons/blob/main/addons/risingwave/templates/cmpd-compute.yaml#L26) are configured as follows: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: risingwave +# ... +spec: +#... + runtime: + containers: + - name: compute + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + command: + - /risingwave/bin/risingwave + - compute-node + env: + - name: RUST_BACKTRACE + value: "1" + - name: RW_CONFIG_PATH + value: /risingwave/config/risingwave.toml + - name: RW_LISTEN_ADDR + value: 0.0.0.0:5688 + - name: RW_ADVERTISE_ADDR + value: $(KB_POD_FQDN):5688 + - name: RW_META_ADDR + value: load-balance+http://$(metaSvc)-headless:5690 + - name: RW_METRICS_LEVEL + value: "1" + - name: RW_CONNECTOR_RPC_ENDPOINT + value: $(connectorSvc):50051 + - name: RW_PROMETHEUS_LISTENER_ADDR + value: 0.0.0.0:1222 +# ... +``` + +After adding an instance template to the [cluster resources](https://github.com/apecloud/kubeblocks-addons/blob/main/addons-cluster/risingwave/templates/cluster.yaml): + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: {{ include "risingwave-cluster.name" . }} + namespace: {{ .Release.Namespace }} +# ... +spec: + componentSpecs: + - componentDef: compute + name: compute + replicas: {{ .Values.risingwave.compute.replicas }} + instances: + - name: instance + replicas: {{ .Values.risingwave.compute.replicas }} + env: + - name: RW_STATE_STORE + value: "hummock+s3://{{ .Values.risingwave.stateStore.s3.bucket }}" + - name: AWS_REGION + value: "{{ .Values.risingwave.stateStore.s3.region }}" + {{- if eq .Values.risingwave.stateStore.s3.authentication.serviceAccountName "" }} + - name: AWS_ACCESS_KEY_ID + value: "{{ .Values.risingwave.stateStore.s3.authentication.accessKey }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ .Values.risingwave.stateStore.s3.authentication.secretAccessKey }}" + {{- end }} + - name: RW_DATA_DIRECTORY + value: "{{ .Values.risingwave.stateStore.dataDirectory }}" + {{- if .Values.risingwave.stateStore.s3.endpoint }} + - name: RW_S3_ENDPOINT + value: "{{ .Values.risingwave.stateStore.s3.endpoint }}" + {{- end }} + {{- if .Values.risingwave.metaStore.etcd.authentication.enabled }} + - name: RW_ETCD_USERNAME + value: "{{ .Values.risingwave.metaStore.etcd.authentication.username }}" + - name: RW_ETCD_PASSWORD + value: "{{ .Values.risingwave.metaStore.etcd.authentication.password }}" + {{- end }} + - name: RW_ETCD_ENDPOINTS + value: "{{ .Values.risingwave.metaStore.etcd.endpoints }}" + - name: RW_ETCD_AUTH + value: "{{ .Values.risingwave.metaStore.etcd.authentication.enabled}}" +# ... +``` + +In the example above, we added an instance template through the `instances` field, named `instance`. This template defines several environment variables such as `RW_STATE_STORE` and `AWS_REGION`. These environment variables will be appended by KubeBlocks to the list of environment variables defined in the default template. Consequently, the rendered instance will contain both the default template and all the environment variables defined in this instance template. + +Additionally, the `replicas` field in the instance template is identical to that in the `componentSpec` (both are `{{ .Values.risingwave.compute.replicas }}`), indicating that after overriding the default template, this instance template will be used to render all instances within this component. + +## Detailed information on instance template + +- `Name` field: For each component, multiple instance templates can be defined. The template name is configured with the `Name` field and must remain unique within the same component. +- `Replica` field: Each template can set the number of instances rendered based on that template via the `Replicas` field, of which the default value is 1. The sum of `Replicas` for all instance templates within the same component must be less than or equal to the `Replicas` value of the component. If the number of instances rendered based on the instance templates is less than the total number required by the component, the remaining instances will be rendered using the default template. + +The pattern for the names of instances rendered based on instance templates is `$(cluster name)-$(component name)-$(instance template name)-ordinal`. For example, in the above RisingWave cluster, the cluster name is `risingwave`, the component name is `compute`, the instance template name is `instance`, and the number of `Replicas` is 3. Therefore, the rendered instance names are risingwave-compute-instance-0, risingwave-compute-instance-1, and risingwave-compute-instance-2. + +Instance templates can be used during cluster creation and can be updated during the operations period. Specifically, this includes adding, deleting, or updating instance templates. Updating instance templates may update, delete, or reconstruct instances. You are recommended to carefully evaluate whether the final changes meet expectations before performing updates. + +### Annotations + +The `Annotations` in the instance template are used to override the `Annotations` field in the default template. If a Key in the `Annotations` of the instance template already exists in the default template, the `value` corresponding to the Key will use the value in the instance template; if the Key does not exist in the default template, the Key and Value will be added to the final `Annotations`. + +***Example:*** + +The `annotations` in the default template are: + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar" +``` + +And `annotations` in the instance templates are: + +```yaml +annotations: + "foo1": "bar1" + "foo2": "bar2" +``` + +Then, after rendering, the actual annotations are: + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar1" + "foo2": "bar2" +``` + +:::note + +KubeBlocks adds system `Annotations`, and do not overwrite them. + +::: + +### Labels + +You can also set `Labels` with the instance template. + +Similar to `Annotations`, `Labels` in instance templates follow the same overriding logic applied to existing labels. + +:::note + +KubeBlocks adds system `Labels`, and do not overwrite them. + +::: + +### Image + +The `Image` field in the instance template is used to override the `Image` field of the first container in the default template. + +:::note + +`Image` field should be used with caution: for the StatefulSet like databases, changing the `Image` often involves compatibility issues with data formats. When changing this field, please ensure that the image version in the instance template is fully compatible with that in the default template. + +::: + +With KubeBlocks version 0.9 and above, detailed design for image versions is provided through `ComponentVersion`. It is recommended to manage versions using `ComponentVersion`. + +### NodeName + +`NodeName` in the instance template overrides the same field in the default template. + +### NodeSelector + +`NodeSelector` in the instance template overrides the same field in the default template. + +### Tolerations + +`Tolerations` in the instance template overrides the same field in the default template. + +If the `Toleration` in the instance template is identical to a `Toleration` in the default template (with the same `Key`, `Operator`, `Value`, `Effect`, and `TolerationSeconds`), then that `Toleration` will be ignored. Otherwise, it will be added to the list of `Tolerations` in the default template. + +### RuntimeClassName + +`RuntimeClassName` in the instance template overrides the same field in the default template. + +### Resources + +`Resources` in the instance template overrides the same field in the default template and gets the highest priority. + +### Env + +The environment variables (`Env`) defined in the instance template will override any other environment variables except for the default `Env` set by KubeBlocks. The overriding logic is similar to `Annotations` and `Labels`. If an environment variable name is the same, the value or value source from the instance template will be used; if it's different, it will be added as a new environment variable. \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/concepts/instance-template/introduction.mdx b/docs/en/release-1_0_1/user_docs/concepts/instance-template/introduction.mdx new file mode 100644 index 00000000..a12617a5 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/concepts/instance-template/introduction.mdx @@ -0,0 +1,27 @@ +--- +title: Introduction +description: Introduction +keywords: [instance template] +sidebar_position: 1 +sidebar_label: Introduction +--- + +# Introduction + +## What is an instance template + +An *instance* serves as the fundamental unit in KubeBlocks, comprising a Pod along with several auxiliary objects. To simplify, you can initially think of it as a Pod, and henceforth, we'll consistently refer to it as an "Instance." + +Starting from version 0.9, we're able to establish multiple instance templates for a particular component within a cluster. These instance templates include several fields such as Name, Replicas, Annotations, Labels, Env, Tolerations, NodeSelector, etc. These fields will ultimately override the corresponding ones in the default template (originating from ClusterDefinition and ComponentDefinition) to generate the final template for rendering the instance. + +## Why do we introduce the instance template + +In KubeBlocks, a *Cluster* is composed of several *Components*, where each *Component* ultimately oversees multiple *Pods* and auxiliary objects. + +Prior to version 0.9, these pods were rendered from a shared PodTemplate, as defined in either ClusterDefinition or ComponentDefinition. However, this design can’t meet the following demands: + + - For Clusters rendered from the same addon, setting separate scheduling configurations such as *NodeName*, *NodeSelector*, or *Tolerations*. + - For Components rendered from the same addon, adding custom *Annotations*, *Labels*, or ENV to the Pods they manage. + - For Pods managed by the same Component, configuring different *CPU*, *Memory*, and other *Resource Requests* and *Limits*. + +With various similar requirements emerging, the Cluster API introduced the Instance Template feature from version 0.9 onwards to cater to these needs. diff --git a/docs/en/release-1_0_1/user_docs/overview/_category_.yml b/docs/en/release-1_0_1/user_docs/overview/_category_.yml new file mode 100644 index 00000000..47311d9d --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/overview/_category_.yml @@ -0,0 +1,4 @@ +position: 1 +label: Getting Started +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/overview/install-kubeblocks.mdx b/docs/en/release-1_0_1/user_docs/overview/install-kubeblocks.mdx new file mode 100644 index 00000000..33d3dbae --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/overview/install-kubeblocks.mdx @@ -0,0 +1,540 @@ +--- +title: Installation +description: Install KubeBlocks on the existing Kubernetes clusters with Helm +keywords: [taints, affinity, tolerance, install, kbcli, KubeBlocks, helm, kubernetes, operator] +sidebar_position: 4 +sidebar_label: Installation +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import { VersionProvider, Version } from '@/components/VersionContext'; + + + +# KubeBlocks + +This guide covers KubeBlocks deployment on existing Kubernetes clusters. Choose your preferred installation method: + +- **Helm** (recommended for production) +- **kbcli** (simplified CLI experience) + + +## Prerequisites + +### Resource Requirements +| Component | Database | Recommendation | +|--------------|------------|---------------| +| **Control Plane** | - | 1 node (4 cores, 4GB RAM, 50GB storage) | +| **Data Plane** | MySQL | 2 nodes (2 cores, 4GB RAM, 50GB storage) | +| | PostgreSQL | 2 nodes (2 cores, 4GB RAM, 50GB storage) | +| | Redis | 2 nodes (2 cores, 4GB RAM, 50GB storage) | +| | MongoDB | 3 nodes (2 cores, 4GB RAM, 50GB storage) | + +- **Control Plane**: Nodes running KubeBlocks components +- **Data Plane**: Nodes hosting database instances + + +### System Requirements + +Before installation, verify your environment meets these requirements: + +- Kubernetes cluster (v1.21+ recommended) - [create test cluster](../references/prepare-a-local-k8s-cluster) if needed +- `kubectl` v1.21+ installed and configured with cluster access +- Helm installed ([installation guide](https://helm.sh/docs/intro/install/)) +- Snapshot Controller installed ([installation guide](../references/install-snapshot-controller)) + +## Install KubeBlocks + + + + + +```bash +# Step 1: Install CRDs +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/{{VERSION}}/kubeblocks_crds.yaml + +# Step 2: Configure Helm Repository +helm repo add kubeblocks https://apecloud.github.io/helm-charts +helm repo update + +# Step 3: Deploy KubeBlocks +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace --version={{VERSION}} +``` + + +:::note + +If you are using K8s \<\= 1.23, you may encounter the following error when installing CRDs: + +```bash +unknown field "x-kubernetes-validations".... if you choose to ignore these errors, turn validation off with --validate\=false +``` + +You can fix this by running the following command: +```bash +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/{{VERSION}}/kubeblocks_crds.yaml --validate\=false +``` +::: + + +**Need a different version?** + +For other versions, you can find available releases on [KubeBlocks Releases](https://github.com/apecloud/kubeblocks/releases/) or query using: + +```bash +# Get latest stable release +curl -s https://api.github.com/repos/apecloud/kubeblocks/releases/latest | jq -r '.tag_name' + +# Get all releases (including pre-releases) +curl -s https://api.github.com/repos/apecloud/kubeblocks/tags | jq -r '.[0].name' +``` + + + + + +**Before You Begin**: +- Install [KubeBlocks CLI](../../user_docs/references/install-kbcli) +- Ensure kubectl is configured with cluster access + +```bash +kbcli kubeblocks install --version={{VERSION}} --create-namespace +``` + +**Need a different version?** + +List available versions or find other releases: + +```bash +# List stable releases +kbcli kubeblocks list-versions + +# List all releases (including pre-releases) +kbcli kb list-versions --devel --limit=100 +``` + +Or browse all releases on [KubeBlocks Releases](https://github.com/apecloud/kubeblocks/releases/). + +:::note + +**Version Compatibility** + +KubeBlocks requires matching major versions between `kbcli` and the installed release: +- Compatible: kbcli v1.0.0 with KubeBlocks v1.0.0 +- Incompatible: kbcli v0.9.0 with KubeBlocks v1.0.0 + +Mismatched major versions may cause unexpected behavior or errors. + +::: + +By default, KubeBlocks installs in the `kb-system` namespace. To specify a different namespace: + +```bash +kbcli kubeblocks install --version={{VERSION}} --create-namespace --namespace my-namespace +``` + +💡 *Remember to replace `my-namespace` with your desired namespace name.* + + + + + +## Verify Installation + +Run the following command to check whether KubeBlocks is installed successfully. + + + + + +```bash +kubectl -n kb-system get pods +``` + +
+ Expected Output: + +If the KubeBlocks Workloads are all ready, KubeBlocks has been installed successfully. + +```bash +NAME READY STATUS RESTARTS AGE +kubeblocks-7cf7745685-ddlwk 1/1 Running 0 4m39s +kubeblocks-dataprotection-95fbc79cc-b544l 1/1 Running 0 4m39s +``` +
+ +
+ + + +```bash +kbcli kubeblocks status +``` + +
+ Expected Output: + +If the KubeBlocks Workloads are all ready, KubeBlocks has been installed successfully. + +```bash +KubeBlocks is deployed in namespace: kb-system,version: {{VERSION}} + +Kubernetes Cluster: +VERSION PROVIDER REGION AVAILABLE ZONES +v1.29.2 Kind + +KubeBlocks Workloads: +NAMESPACE KIND NAME READY PODS CPU(CORES) MEMORY(BYTES) CREATED-AT +kb-system Deployment kubeblocks 1/1 N/A N/A May 26,2025 13:53 UTC+0800 +kb-system Deployment kubeblocks-dataprotection 1/1 N/A N/A May 26,2025 13:53 UTC+0800 + +KubeBlocks Addons: +NAME STATUS TYPE PROVIDER +apecloud-mysql Enabled Helm N/A +etcd Enabled Helm N/A +kafka Enabled Helm N/A +``` +
+ +
+ +
+ +## Advanced Configuration + +Here list some commonly used configurations for KubeBlocks. For more information about KubeBlocks options, please refer to [KubeBlocks Options](../references/kubeblocks_options). + +### Custom Image Registry + +Specify image repository by specifying the following parameters. + + + + + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +```bash +kbcli kubeblocks upgrade --version {{VERSION}} \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +Here is an introduction to the flags in the above command. + +- `--set image.registry` specifies the KubeBlocks image registry. +- `--set dataProtection.image.registry` specifies the KubeBlocks-DataProtection image registry. +- `--set addonChartsImage.registry` specifies Addon Charts image registry. + +If you cannot access `docker.io` please use following registry and namespace: +- registry: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com +- namespace: apecloud + +### Specify tolerations + +If you want to install KubeBlocks with custom tolerations, you can use the following command: + + + + + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set-json 'tolerations=[ { "key": "control-plane-taint", "operator": "Equal", "effect": "NoSchedule", "value": "true" } ]' \ +--set-json 'dataPlane.tolerations=[{ "key": "data-plane-taint", "operator": "Equal", "effect": "NoSchedule", "value": "true"}]' +``` + + + + + +```bash +kbcli kubeblocks install --version {{VERSION}} --create-namespace \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +### Skip Addon Auto Installation + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set autoInstalledAddons="{}" +``` + +### Enable In-place Vertical Scaling + +To enable in-place vertical scaling for KubeBlocks, set the feature gate parameter during installation or upgrade: + +```bash +featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + +1. Installation +```bash +helm install kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --create-namespace \ + --version {{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + +1. Upgrade +```bash +helm upgrade kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --version {{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + + +1. Installation +```bash +kbcli kubeblocks install \ + --version={{VERSION}} \ + --create-namespace \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + +1. Upgrade +```bash +kbcli kubeblocks upgrade \ + --version={{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + + +**Verification** + +After installation or upgrade, verify the feature gate is enabled: + +```bash +kubectl -n kb-system get deployments.apps kubeblocks -oyaml | \ + yq '.spec' | \ + grep IN_PLACE_POD_VERTICAL_SCALING -A 1 +``` + +The output should show: +```text +- name: IN_PLACE_POD_VERTICAL_SCALING + value: "true" +``` + +## Uninstall KubeBlocks + +:::note + +Please delete all clusters and backups before uninstalling KubeBlocks and kbcli. +::: + +```bash +# get cluster and backups +kubectl get cluster -A +kubectl get backup -A + +# delete clusters and backups by by namespace +kubectl delete cluster -n +kubectl delete backup -n +``` + + + + + +1. List all addons +```bash +# list all addons +helm list -n kb-system | grep kb-addon +``` + +2. Uninstall all Addons. +```bash +helm list -n kb-system | grep kb-addon | awk '{print $1}' | xargs -I {} helm -n kb-system uninstall {} +``` + +While uninstalling, you will get messages like +``` +Release "kb-addon-etcd" uninstalled +These resources were kept due to the resource policy: +[ConfigMap] kafka27-configuration-tpl-1.0.0 +[ComponentDefinition] kafka-combine-1.0.0 +[ComponentDefinition] kafka-controller-1.0.0 +[ComponentDefinition] kafka-exporter-1.0.0 +[ComponentDefinition] kafka27-broker-1.0.0 +[ComponentDefinition] kafka-broker-1.0.0 +``` + +Some resources are kept due to resource policy, then check and remove them all + +3. Check remaining resources, such as ComponentDefinition, ConfigMaps for Configuration, and remove them all. +```bash +kubectl get componentdefinitions.apps.kubeblocks.io +kubectl get parametersdefinitions.parameters.kubeblocks.io +kubectl get configmap -n kb-system | grep configuration +kubectl get configmap -n kb-system | grep template +``` + +For example +```bash +kubectl delete componentdefinitions.apps.kubeblocks.io --all +kubectl delete parametersdefinitions.parameters.kubeblocks.io --all +kubectl get configmap -n kb-system | grep configuration | awk '{print $1}' | xargs -I {} kubectl delete -n kb-system cm {} +kubectl get configmap -n kb-system | grep template| awk '{print $1}' | xargs -I {} kubectl delete -n kb-system cm {} +``` + +4. Delete Addon CRs + +```bash +kubectl delete addon.extensions.kubeblocks.io --all +``` + +5. Verify all KubeBlocks resources are deleted + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` + +If the output shows any custom resource left, please remove them all. + +6. Uninstall KubeBlocks + +```bash +helm uninstall kubeblocks --namespace kb-system +``` + +Helm does not delete CRD objects. You can delete the ones KubeBlocks created with the following commands: + +```bash +kubectl get crd -o name | grep kubeblocks.io | xargs kubectl delete +``` + +7. Verify all KubeBlocks resources are deleted + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` +The output should be empty. + + + + + +1. Check addon list +```bash +kbcli addon list | grep Enabled +``` + +2. set `keepResource=false` for all addons +```bash +# update addons values, to remove annotation 'helm.sh/resource-policy: keep' from ComponentDefinition/ConfigMaps +kbcli addon enable --set extra.keepResource=false +``` + +For example +```bash +kbcli addon enable apecloud-mysql --set extra.keepResource=false +kbcli addon enable etcd --set extra.keepResource=false +kbcli addon enable kafka --set extra.keepResource=false +kbcli addon enable mongodb --set extra.keepResource=false +kbcli addon enable mysql --set extra.keepResource=false +kbcli addon enable postgresql --set extra.keepResource=false +kbcli addon enable redis --set extra.keepResource=false +``` + +3. disable all Addons + +```bash +kbcli addon disable +``` + +For example, +```bash +kbcli addon disable apecloud-mysql +kbcli addon disable etcd +kbcli addon disable kafka +kbcli addon disable mongodb +kbcli addon disable mysql +kbcli addon disable postgresql +kbcli addon disable redis +``` + +4. Verify all KubeBlocks resources are deleted + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` + +If the output shows some custom resource left, please remove them all. + +5. uninstall KubeBlocks +```bash +kbcli kubeblocks uninstall +``` + +6. Verify all KubeBlocks resources are deleted + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` +check there is no ConfigMap leftover: + +```bash +kubectl get configmap -n kb-system +``` + + + + + +
diff --git a/docs/en/release-1_0_1/user_docs/overview/introduction.mdx b/docs/en/release-1_0_1/user_docs/overview/introduction.mdx new file mode 100644 index 00000000..a86debbd --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/overview/introduction.mdx @@ -0,0 +1,242 @@ +--- +title: Introduction +description: introduction to KubeBlocks +keywords: [kubernetes, operator, database, kubeblocks, overview, introduction] +sidebar_position: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Introduction + +## What is KubeBlocks + +KubeBlocks is an open-source Kubernetes operator for databases (more specifically, for stateful applications, including databases and middleware like message queues), enabling users to run and manage multiple types of databases on Kubernetes. As far as we know, most database operators typically manage only one specific type of database. For example: +- CloudNativePG, Zalando, CrunchyData, StackGres operator can manage PostgreSQL +- Strimzi manages Kafka +- Oracle and Percona MySQL operator manage MySQL + +In contrast, KubeBlocks is designed to be a **general-purpose database operator**. This means that when designing the KubeBlocks API, we didn’t tie it to any specific database. Instead, we abstracted the common features of various databases, resulting in a universal, engine-agnostic API. Consequently, the operator implementation developed around this abstract API is also agnostic to the specific database engine. + +![Design of KubeBlocks, a general purpose database operator](/img/docs/en/kubeblocks_general_purpose_arch.png) + +In above diagram, Cluster, Component, and InstanceSet are all CRDs provided by KubeBlocks. If you'd like to learn more about them, please refer to [concepts](../concepts/concept). + +KubeBlocks offers an Addon API to support the integration of various databases. For instance, we currently have the following KubeBlocks Addons for mainstream open-source database engines: +- MySQL +- PostgreSQL +- Redis +- MongoDB +- Kafka +- RabbitMQ +- Minio +- Elasticsearch +- StarRocks +- Qdrant +- Milvus +- ZooKeeper +- etcd +- ... + +For a detailed list of Addons and their features, please refer to [supported addons](supported-addons.md). + +The unified API makes KubeBlocks an excellent choice if you need to run multiple types of databases on Kubernetes. It can significantly reduce the learning curve associated with mastering multiple operators. + +## How unified APIs reduce your learning curve + +Here is an example of how to use KubeBlocks' Cluster API to write a YAML file and create a MySQL Cluster with two replicas. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-mysql + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: mysql + componentDef: "mysql-8.0" + serviceVersion: 8.0.35 + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +Then, here comes the magic: with just a few modifications to some fields, you can create a PostgreSQL Cluster with two replicas! The same applies to MongoDB and Redis (the Redis example is slightly longer because it creates two components: redis-server and sentinel), and this approach works with a long list of engines. + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-pg + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-mongo + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: test-redis + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: redis + componentDef: redis-7 + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: redis-sentinel + componentDef: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + + + + +This means that managing multiple databases on Kubernetes becomes simple, efficient, and standardized, saving you a lot of time that would otherwise be spent searching through manuals and API references. + +## Key features + +- Provisioning and destroy database clusters. +- Start, stop, and restart database clusters +- Supports selecting a deployment topology provided by the engine's Addon when creating a cluster, such as Redis with options for Sentinel-based read-write separation or Redis Cluster; MySQL with optional Proxy for read-write separation and HA solutions, e.g. the built-in Raft consensus plugin, external etcd as the coordinator, or Orchestrator. +- Supports having different configurations for multiple replicas within a single database cluster. This is common, for example, in a MySQL cluster where the primary instance uses 8 CPUs while the read replicas use 4 CPUs. Kubernetes' StatefulSet does not support this capability. +- Flexible Network Management: + - Expose database access endpoints as Services (ClusterIP, LoadBalancer, NodePort) Dynamically. + - Support for HostNetwork. + - Some databases support access through a so-called Smart Client, which redirects requests to other nodes or handles read-write separation based on the node addresses returned by the server. Databases that with the Smart Client access mode include Redis, MongoDB, and Kafka. Additionally, some databases, such as etcd, have clients that implement automatic failover between replicas. For these databases, KubeBlocks supports assigning a service address to each Pod (Pod Service). +- Supports a Wide Range of Day-2 Operations: + - Horizontal scaling (increasing and decreasing the number of replicas) + - Vertical scaling (adjusting CPU and memory resources for each replica) + - PVC Volume capacity expansion + - Backup and restore capabilities + - Configuration changes (and hot reload, if possible) + - Parameter modification + - Switchover + - Rolling upgrades + - Decommission a specific replica + - Minor version upgrades +- In addition to the declarative API, KubeBlocks also offers an OpsRequest API for executing one-time operational tasks on database clusters. The OpsRequest API supports additional features such as queuing, concurrency control, progress tracking, and operation rollback. +- Observability: Supports integration with Prometheus and Grafana. +- Includes a powerful and intuitive command-line tool `kbcli`, which makes operating KubeBlocks CRs on Kubernetes more straightforward and reduces keystrokes. For those well-versed in Kubernetes, kbcli can be used alongside kubectl to provide a more streamlined way of performing operations. + +## Deployment Architecture +Below is a typical diagram illustrating the deployment of KubeBlocks in a cloud environment. + +Kubernetes should be deployed in an environment where nodes can communicate with each other over the network (e.g., within a VPC). The KubeBlocks Operator is deployed in a dedicated namespace (kb-system), while database instances are deployed in user-specified namespaces. + +In a production environment, we recommend deploying the KubeBlocks Operator (along with Prometheus and Grafana, if installed) on different nodes from the databases. By default, multiple replicas of a database cluster are scheduled to run on different nodes using anti-affinity rules to ensure high availability. Users can also configure AZ-level anti-affinity to distribute database replicas across different availability zones (AZs), thereby enhancing disaster recovery capabilities. + +Each database replica runs within its own Pod. In addition to the container running the database process, the Pod includes several sidecar containers: one called `lorry` (which will be renamed to kbagent starting from KubeBlocks v1.0) that executes Action commands from the KubeBlocks controller, and another called `config-manager` that manages database configuration files and supports hot updates. Optionally, The engine's Addon may have an exporter container to collect metrics for Prometheus monitoring. + +![KubeBlocks Architecture](/img/docs/en/kubeblocks-architecture-ha.png) diff --git a/docs/en/release-1_0_1/user_docs/overview/supported-addons.mdx b/docs/en/release-1_0_1/user_docs/overview/supported-addons.mdx new file mode 100644 index 00000000..73be9adc --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/overview/supported-addons.mdx @@ -0,0 +1,216 @@ +--- +title: Supported Addons +description: Addons supported by KubeBlocks +keywords: [addons, enable, KubeBlocks, prometheus, s3, alertmanager, kubernetes, operator, database, mysql, mariadb, postgresql, redis cluster, oracle, mssql, sqlserver, qdrant, milvus, minio, mongodb, etcd, zookeeper, starrocks, elasticsearch, clickhouse, kafka, tidb, influxdb] +sidebar_position: 3 +sidebar_label: Supported addons +--- + +# Supported Addons + +KubeBlocks uses Addons to extend support for various database engines. And there are currently over 30 Addons available in the KubeBlocks repository, which can be further categorized as follows sections. + +For installing and enabling Addons, refer to [Addon installation tutorial](./../references/install-addons). + +## Relational Databases + +MySQL and PostgreSQL are the two most popular open-source relational databases in the world, and they have branches/variants. + +### MySQL and its variants + +**Addon List** + +| Addons | Description | +|:----------------|:---------------| +| mysql | This addon uses the community edition MySQL image officially released by Oracle. | +| mariadb | MariaDB is a high performance open source relational database management system that is widely used for web and application servers. | + +**Supported Features** + +:::note + +The versions listed below may not be up-to-date, and some supported versions might be missing. For the latest addon versions, please refer to the [KubeBlocks addon GitHub repo](https://github.com/apecloud/kubeblocks-addons). + +::: + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| mysql | • 5.7.44
• 8.0.33
• 8.4.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| mariadb | 10.6.15 | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +### Postgresql and its variants + +**Addon List** + +| Addons | Description | +|:----------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| postgresql | This addon provides PostgreSQL services using the Spilo image with Patroni for high availability (HA). | +| vanilla-postgresql | This Addon is based on the vanilla PostgreSQL, providing high availability capabilities for native PostgreSQL and its variants. | +| orioledb | OrioleDB is a new storage engine for PostgreSQL, bringing a modern approach to database capacity, capabilities and performance to the world's most-loved database platform. | + + +**Supported Features** + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| postgresql | • 12.14.0
• 12.14.1
• 12.15.0
• 14.7.2
• 14.8.0
• 15.7.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| vanilla-postgresql | • 12.15.0
• 14.7.0
• 15.6.1138
• 15.7.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| orioledb | 14.7.2-beta1 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## NoSQL + +**Addon List** + +| Addons | Description | +|:----------------|:-----------------------------| +| mongodb | MongoDB is a document-oriented NoSQL database used for high volume data storage. | +| redis | Redis is a fast, open source, in-memory, key-value data store. | +| etcd | etcd is a strongly consistent, distributed key-value store that provides a reliable way to store data that needs to be accessed by a distributed system or cluster of machines. | +| zookeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. | + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| mongodb | • 4.0.28
• 4.2.24
• 4.4.29
• 5.0.28
• 6.0.16
• 7.0.12 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | ✔️ | +| redis | • 7.0.6
• 7.2.4
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | ✔️ | N/A | +| etcd |

3.5.15

3.5.6

| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| zookeeper |

3.4.14

3.6.4

3.7.2

3.8.4

3.9.2

| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | ✔️ | ✔️ | N/A | N/A | N/A | N/A | + + +## OLAP Systems + +**Addon List** + +| Addons | Description | +|:----------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| elasticsearch | Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. | +| starrocks-ce | StarRocks is a next-gen, high-performance analytical data warehouse that enables real-time, multi-dimensional, and highly concurrent data analysis. | +| clickhouse | ClickHouse is a column-oriented database that enables its users to generate powerful analytics, using SQL queries, in real-time. | +| opensearch | Open source distributed and RESTful search engine. | + +**Supported Features** + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:-------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| elasticsearch | • 7.10.1
• 7.7.1
• 7.8.1
• 8.1.3
• 8.8.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| starrocks-ce | • 3.2.2
• 3.3.0
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| clickhouse | 22.9.4 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| opensearch | 2.7.0 | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## Distributed SQL Databases + +**Addon List** + +| Addons | Description | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tidb | TiDB is a MySQL-compatible distributed database, with the SQL layer developed in Go, the storage layer based on RocksDB, and the transaction model using Percolator. Provided by PingCap. | +| oceanbase-ce | OceanBase Community Edition is a MySQL-compatible distributed database developed in C++. | +| polardb-x | PolarDB-X Community Edition is a MySQL-compatible distributed database that supports horizontal scaling based on MySQL. Provided by Alibaba Cloud, it is open-source. | + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| tidb | • 6.5.10
• 7.1.5
• 7.5.2
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| oceanbase | 4.3.0 | N/A | ✔️ | ✔️ | N/A | N/A | | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| polardb-x | 2.3 | ✔️ | ✔️ | N/A | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## Message Queues + +**Addon List** + +| Addons | Description | +|:----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| kafka | Apache Kafka is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications.
| +| rabbitmq | RabbitMQ is a reliable and mature messaging and streaming broker. | +| pulsar | Apache Pulsar is an open-source, distributed messaging and streaming platform. | + + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| kafka | • 3.3.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | +| rabbitmq | • 3.13.2
• 3.12.14
• 3.11.28
• 3.10.25
• 3.9.29
• 3.8.14 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | Managed by the RabbitMQ Management system. | ✔️ | ✔️ | +| pulsar | • 2.11.2
• 3.0.2
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | + +## Vector Databases + +**Addon List** + +| Addons | Description | +|:----------------|:-------------------------------------------------------------------------------------------| +| qdrant | Qdrant is a vector database & vector similarity search engine. | +| weaviate | Weaviate is an open-source vector database. | +| milvus | Milvus is a flexible, reliable, & blazing-fast cloud-native, open-source vector database. | + + +**Supported Features** + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| qdrant | • 1.10.0
• 1.5.0
• 1.7.3
• 1.8.1
• 1.8.4 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | +| weaviate | 1.23.1 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | ✔️ | N/A | N/A | N/A | N/A | +| milvus | 2.3.2 | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## Time Series Databases + +**Addon List** + +| Addons | Description | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| influxdb | InfluxDB enables real-time analytics by serving as a purpose-built database that optimizes processing and scaling for large time series data workloads. | +| victoria-metrics | VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. | +| greptimedb | GreptimeDB is an open-source time-series database with a special focus on scalability, analytical capabilities and efficiency. | +| tdengine | TDengine™ is an industrial data platform purpose-built for the Industrial IoT, combining a time series database with essential features like stream processing, data subscription, and caching. | + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| influxdb | 2.7.4 | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| victoria-metrics | 1.0.0 | | | | | | | | | | | | | | +| greptimedb | 0.3.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| tdengine | 3.0.5 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## Graph Databases + +**Addon List** + +| Addons | Description | +|:----------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| nebula | NebulaGraph is an open source graph database that can store and process graphs with trillions of edges and vertices. | + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| nebula | 3.5.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## Storage System + +**Addon List** + +| Addons | Description | +|:----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| minio | MinIO is an object storage solution that provides an Amazon Web Services S3-compatible API and supports all core S3 features. | + + +**Supported Features** + + +| Addon (v0.9.0) | Supported Versions | Vscale | Hscale | Volumeexpand | Stop/Start | Restart | Expose | Backup/Restore | Logs | Config | Upgrade (DB engine version) | Account | Failover | Switchover | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| minio | RELEASE.2024-06-29T01-20-47Z | ✔️ | ✔️ | N/A | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + diff --git a/docs/en/release-1_0_1/user_docs/references/_category_.yml b/docs/en/release-1_0_1/user_docs/references/_category_.yml new file mode 100644 index 00000000..507b04db --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/_category_.yml @@ -0,0 +1,4 @@ +position: 81 +label: References +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/_category_.yml b/docs/en/release-1_0_1/user_docs/references/api-reference/_category_.yml new file mode 100644 index 00000000..1289d307 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/_category_.yml @@ -0,0 +1,4 @@ +position: 1 +label: API Reference +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/add-on.mdx b/docs/en/release-1_0_1/user_docs/references/api-reference/add-on.mdx new file mode 100644 index 00000000..eeb6961c --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/add-on.mdx @@ -0,0 +1,2545 @@ +--- +title: Add-On API Reference +description: Add-On API Reference +keywords: [add-on, api] +sidebar_position: 5 +sidebar_label: Add-On +--- +
+ +

+Packages: +

+ +

extensions.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+Addon + +

+
+ +

+Addon is the Schema for the add-ons API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`extensions.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Addon` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +AddonSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Specifies the description of the add-on. +

+ +
+ +`type`
+ + +AddonType + + + +
+ + +

+Defines the type of the add-on. The only valid value is ‘helm’. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Indicates the version of the add-on. +

+ +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the provider of the add-on. +

+ +
+ +`helm`
+ + +HelmTypeInstallSpec + + + +
+ +(Optional) + +

+Represents the Helm installation specifications. This is only processed +when the type is set to ‘helm’. +

+ +
+ +`defaultInstallValues`
+ + +[]AddonDefaultInstallSpecItem + + + +
+ + +

+Specifies the default installation parameters. +

+ +
+ +`install`
+ + +AddonInstallSpec + + + +
+ +(Optional) + +

+Defines the installation parameters. +

+ +
+ +`installable`
+ + +InstallableSpec + + + +
+ +(Optional) + +

+Represents the installable specifications of the add-on. This includes +the selector and auto-install settings. +

+ +
+ +`cliPlugins`
+ + +[]CliPlugin + + + +
+ +(Optional) + +

+Specifies the CLI plugin installation specifications. +

+ +
+ +
+ +`status`
+ + +AddonStatus + + + +
+ + +
+

+AddonDefaultInstallSpecItem + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpec`
+ + +AddonInstallSpec + + + +
+ + +

+ +(Members of `AddonInstallSpec` are embedded into this type.) + +

+ +
+ +`selectors`
+ + +[]SelectorRequirement + + + +
+ +(Optional) + +

+Indicates the default selectors for add-on installations. If multiple selectors are provided, +all selectors must evaluate to true. +

+ +
+

+AddonInstallExtraItem + +

+ +

+ +(Appears on:AddonInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpecItem`
+ + +AddonInstallSpecItem + + + +
+ + +

+ +(Members of `AddonInstallSpecItem` are embedded into this type.) + +

+ +
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the item. +

+ +
+

+AddonInstallSpec + +

+ +

+ +(Appears on:AddonDefaultInstallSpecItem, AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpecItem`
+ + +AddonInstallSpecItem + + + +
+ + +

+ +(Members of `AddonInstallSpecItem` are embedded into this type.) + +

+ +
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Can be set to true if there are no specific installation attributes to be set. +

+ +
+ +`extras`
+ + +[]AddonInstallExtraItem + + + +
+ +(Optional) + +

+Specifies the installation specifications for extra items. +

+ +
+

+AddonInstallSpecItem + +

+ +

+ +(Appears on:AddonInstallExtraItem, AddonInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of replicas. +

+ +
+ +`persistentVolumeEnabled`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the Persistent Volume is enabled or not. +

+ +
+ +`storageClass`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the storage class. +

+ +
+ +`tolerations`
+ +string + + +
+ +(Optional) + +

+Specifies the tolerations in a JSON array string format. +

+ +
+ +`resources`
+ + +ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resource requirements. +

+ +
+

+AddonPhase +(`string` alias) +

+ +

+ +(Appears on:AddonStatus) + +

+
+ +

+AddonPhase defines addon phases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Disabled" +

+
+ +
+ +

+"Disabling" +

+
+ +
+ +

+"Enabled" +

+
+ +
+ +

+"Enabling" +

+
+ +
+ +

+"Failed" +

+
+ +
+

+AddonSelectorKey +(`string` alias) +

+ +

+ +(Appears on:SelectorRequirement) + +

+
+ +

+AddonSelectorKey are selector requirement key types. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeGitVersion" +

+
+ +
+ +

+"KubeProvider" +

+
+ +
+ +

+"KubeVersion" +

+
+ +
+

+AddonSpec + +

+ +

+ +(Appears on:Addon) + +

+
+ +

+AddonSpec defines the desired state of an add-on. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`description`
+ +string + + +
+ +(Optional) + +

+Specifies the description of the add-on. +

+ +
+ +`type`
+ + +AddonType + + + +
+ + +

+Defines the type of the add-on. The only valid value is ‘helm’. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Indicates the version of the add-on. +

+ +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the provider of the add-on. +

+ +
+ +`helm`
+ + +HelmTypeInstallSpec + + + +
+ +(Optional) + +

+Represents the Helm installation specifications. This is only processed +when the type is set to ‘helm’. +

+ +
+ +`defaultInstallValues`
+ + +[]AddonDefaultInstallSpecItem + + + +
+ + +

+Specifies the default installation parameters. +

+ +
+ +`install`
+ + +AddonInstallSpec + + + +
+ +(Optional) + +

+Defines the installation parameters. +

+ +
+ +`installable`
+ + +InstallableSpec + + + +
+ +(Optional) + +

+Represents the installable specifications of the add-on. This includes +the selector and auto-install settings. +

+ +
+ +`cliPlugins`
+ + +[]CliPlugin + + + +
+ +(Optional) + +

+Specifies the CLI plugin installation specifications. +

+ +
+

+AddonStatus + +

+ +

+ +(Appears on:Addon) + +

+
+ +

+AddonStatus defines the observed state of an add-on. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +AddonPhase + + + +
+ + +

+Defines the current installation phase of the add-on. It can take one of +the following values: `Disabled`, `Enabled`, `Failed`, `Enabling`, `Disabling`. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides a detailed description of the current state of add-on API installation. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this add-on. It corresponds +to the add-on’s generation, which is updated on mutation by the API Server. +

+ +
+

+AddonType +(`string` alias) +

+ +

+ +(Appears on:AddonSpec) + +

+
+ +

+AddonType defines the addon types. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Helm" +

+
+ +
+

+CliPlugin + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the plugin. +

+ +
+ +`indexRepository`
+ +string + + +
+ + +

+Defines the index repository of the plugin. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief description of the plugin. +

+ +
+

+DataObjectKeySelector + +

+ +

+ +(Appears on:HelmInstallValues) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the name of the object being referred to. +

+ +
+ +`key`
+ +string + + +
+ + +

+Specifies the key to be selected. +

+ +
+

+HelmInstallOptions +(`map[string]string` alias) +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+

+HelmInstallValues + +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`urls`
+ +[]string + + +
+ +(Optional) + +

+Specifies the URL location of the values file. +

+ +
+ +`configMapRefs`
+ + +[]DataObjectKeySelector + + + +
+ +(Optional) + +

+Selects a key from a ConfigMap item list. The value can be +a JSON or YAML string content. Use a key name with “.json”, “.yaml”, or “.yml” +extension to specify a content type. +

+ +
+ +`secretRefs`
+ + +[]DataObjectKeySelector + + + +
+ +(Optional) + +

+Selects a key from a Secrets item list. The value can be +a JSON or YAML string content. Use a key name with “.json”, “.yaml”, or “.yml” +extension to specify a content type. +

+ +
+ +`setValues`
+ +[]string + + +
+ +(Optional) + +

+Values set during Helm installation. Multiple or separate values can be specified with commas (key1=val1,key2=val2). +

+ +
+ +`setJSONValues`
+ +[]string + + +
+ +(Optional) + +

+JSON values set during Helm installation. Multiple or separate values can be specified with commas (key1=jsonval1,key2=jsonval2). +

+ +
+

+HelmJSONValueMapType + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`tolerations`
+ +string + + +
+ +(Optional) + +

+Specifies the toleration mapping key. +

+ +
+

+HelmTypeInstallSpec + +

+ +

+ +(Appears on:AddonSpec) + +

+
+ +

+HelmTypeInstallSpec defines the Helm installation spec. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`chartLocationURL`
+ +string + + +
+ + +

+Specifies the URL location of the Helm Chart. +

+ +
+ +`installOptions`
+ + +HelmInstallOptions + + + +
+ +(Optional) + +

+Defines the options for Helm release installation. +

+ +
+ +`installValues`
+ + +HelmInstallValues + + + +
+ +(Optional) + +

+Defines the set values for Helm release installation. +

+ +
+ +`valuesMapping`
+ + +HelmValuesMapping + + + +
+ +(Optional) + +

+Defines the mapping of add-on normalized resources parameters to Helm values’ keys. +

+ +
+ +`chartsImage`
+ +string + + +
+ +(Optional) + +

+Defines the image of Helm charts. +

+ +
+ +`chartsPathInImage`
+ +string + + +
+ +(Optional) + +

+Defines the path of Helm charts in the image. This path is used to copy +Helm charts from the image to the shared volume. The default path is “/charts”. +

+ +
+

+HelmValueMapType + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicaCount`
+ +string + + +
+ +(Optional) + +

+Defines the key for setting the replica count in the Helm values map. +

+ +
+ +`persistentVolumeEnabled`
+ +string + + +
+ +(Optional) + +

+Indicates whether the persistent volume is enabled in the Helm values map. +

+ +
+ +`storageClass`
+ +string + + +
+ +(Optional) + +

+Specifies the key for setting the storage class in the Helm values map. +

+ +
+

+HelmValuesMapping + +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`HelmValuesMappingItem`
+ + +HelmValuesMappingItem + + + +
+ + +

+ +(Members of `HelmValuesMappingItem` are embedded into this type.) + +

+ +
+ +`extras`
+ + +[]HelmValuesMappingExtraItem + + + +
+ +(Optional) + +

+Helm value mapping items for extra items. +

+ +
+

+HelmValuesMappingExtraItem + +

+ +

+ +(Appears on:HelmValuesMapping) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`HelmValuesMappingItem`
+ + +HelmValuesMappingItem + + + +
+ + +

+ +(Members of `HelmValuesMappingItem` are embedded into this type.) + +

+ +
+ +`name`
+ +string + + +
+ + +

+Name of the item. +

+ +
+

+HelmValuesMappingItem + +

+ +

+ +(Appears on:HelmValuesMapping, HelmValuesMappingExtraItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`valueMap`
+ + +HelmValueMapType + + + +
+ +(Optional) + +

+Defines the “key” mapping values. Valid keys include `replicaCount`, +`persistentVolumeEnabled`, and `storageClass`. +Enum values explained: +

+
    +
  • +`replicaCount` sets the replicaCount value mapping key. +
  • +
  • +`persistentVolumeEnabled` sets the persistent volume enabled mapping key. +
  • +
  • +`storageClass` sets the storageClass mapping key. +
  • +
+ +
+ +`jsonMap`
+ + +HelmJSONValueMapType + + + +
+ +(Optional) + +

+Defines the “key” mapping values. The valid key is tolerations. +Enum values explained: +

+
    +
  • +`tolerations` sets the toleration mapping key. +
  • +
+ +
+ +`resources`
+ + +ResourceMappingItem + + + +
+ +(Optional) + +

+Sets resources related mapping keys. +

+ +
+

+InstallableSpec + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`selectors`
+ + +[]SelectorRequirement + + + +
+ +(Optional) + +

+Specifies the selectors for add-on installation. If multiple selectors are provided, +they must all evaluate to true for the add-on to be installed. +

+ +
+ +`autoInstall`
+ +bool + + +
+ + +

+Indicates whether an add-on should be installed automatically. +

+ +
+

+LineSelectorOperator +(`string` alias) +

+ +

+ +(Appears on:SelectorRequirement) + +

+
+ +

+LineSelectorOperator defines line selector operators. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Contains" +

+
+ +
+ +

+"DoesNotContain" +

+
+ +
+ +

+"DoesNotMatchRegex" +

+
+ +
+ +

+"MatchRegex" +

+
+ +
+

+ResourceMappingItem + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storage`
+ +string + + +
+ +(Optional) + +

+Specifies the key used for mapping the storage size value. +

+ +
+ +`cpu`
+ + +ResourceReqLimItem + + + +
+ +(Optional) + +

+Specifies the key used for mapping both CPU requests and limits. +

+ +
+ +`memory`
+ + +ResourceReqLimItem + + + +
+ +(Optional) + +

+Specifies the key used for mapping both Memory requests and limits. +

+ +
+

+ResourceReqLimItem + +

+ +

+ +(Appears on:ResourceMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requests`
+ +string + + +
+ +(Optional) + +

+Specifies the mapping key for the request value. +

+ +
+ +`limits`
+ +string + + +
+ +(Optional) + +

+Specifies the mapping key for the limit value. +

+ +
+

+ResourceRequirements + +

+ +

+ +(Appears on:AddonInstallSpecItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`limits`
+ + +Kubernetes core/v1.ResourceList + + + +
+ +(Optional) + +

+Limits describes the maximum amount of compute resources allowed. +More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. +

+ +
+ +`requests`
+ + +Kubernetes core/v1.ResourceList + + + +
+ +(Optional) + +

+Requests describes the minimum amount of compute resources required. +If Requests is omitted for a container, it defaults to Limits if that is explicitly specified; +otherwise, it defaults to an implementation-defined value. +More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. +

+ +
+

+SelectorRequirement + +

+ +

+ +(Appears on:AddonDefaultInstallSpecItem, InstallableSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ + +AddonSelectorKey + + + +
+ + +

+The selector key. Valid values are KubeVersion, KubeGitVersion and KubeProvider. +

+
    +
  • +`KubeVersion` the semver expression of Kubernetes versions, i.e., v1.24. +
  • +
  • +`KubeGitVersion` may contain distro. info., i.e., v1.24.4+eks. +
  • +
  • +`KubeProvider` the Kubernetes provider, i.e., aws, gcp, azure, huaweiCloud, tencentCloud etc. +
  • +
+ +
+ +`operator`
+ + +LineSelectorOperator + + + +
+ + +

+Represents a key’s relationship to a set of values. +Valid operators are Contains, NotIn, DoesNotContain, MatchRegex, and DoesNoteMatchRegex. +

+ +

+Possible enum values: +

+
    +
  • +`Contains` line contains a string. +
  • +
  • +`DoesNotContain` line does not contain a string. +
  • +
  • +`MatchRegex` line contains a match to the regular expression. +
  • +
  • +`DoesNotMatchRegex` line does not contain a match to the regular expression. +
  • +
+ +
+ +`values`
+ +[]string + + +
+ +(Optional) + +

+Represents an array of string values. This serves as an “OR” expression to the operator. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/cluster.mdx b/docs/en/release-1_0_1/user_docs/references/api-reference/cluster.mdx new file mode 100644 index 00000000..3a99c520 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/cluster.mdx @@ -0,0 +1,57618 @@ +--- +title: Cluster API Reference +description: Cluster API Reference +keywords: [cluster, api] +sidebar_position: 1 +sidebar_label: Cluster +--- +
+ +

+Packages: +

+ +

apps.kubeblocks.io/v1

+
+
+Resource Types: + +

+Cluster + +

+
+ +

+Cluster offers a unified management interface for a wide variety of database and storage systems: +

+
    +
  • +Relational databases: MySQL, PostgreSQL, MariaDB +
  • +
  • +NoSQL databases: Redis, MongoDB +
  • +
  • +KV stores: ZooKeeper, etcd +
  • +
  • +Analytics systems: ElasticSearch, OpenSearch, ClickHouse, Doris, StarRocks, Solr +
  • +
  • +Message queues: Kafka, Pulsar +
  • +
  • +Distributed SQL: TiDB, OceanBase +
  • +
  • +Vector databases: Qdrant, Milvus, Weaviate +
  • +
  • +Object storage: Minio +
  • +
+ +

+KubeBlocks utilizes an abstraction layer to encapsulate the characteristics of these diverse systems. +A Cluster is composed of multiple Components, each defined by vendors or KubeBlocks Addon developers via ComponentDefinition, +arranged in Directed Acyclic Graph (DAG) topologies. +The topologies, defined in a ClusterDefinition, coordinate reconciliation across Cluster’s lifecycle phases: +Creating, Running, Updating, Stopping, Stopped, Deleting. +Lifecycle management ensures that each Component operates in harmony, executing appropriate actions at each lifecycle stage. +

+ +

+For sharded-nothing architecture, the Cluster supports managing multiple shards, +each shard managed by a separate Component, supporting dynamic resharding. +

+ +

+The Cluster object is aimed to maintain the overall integrity and availability of a database cluster, +serves as the central control point, abstracting the complexity of multiple-component management, +and providing a unified interface for cluster-wide operations. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`Cluster` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterDef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Delete`: Deletes all runtime resources belong to the Cluster. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`shardings`
+ + +[]ClusterSharding + + + +
+ +(Optional) + +

+Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ClusterSharding. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +
+ +`status`
+ + +ClusterStatus + + + +
+ + +
+

+ClusterDefinition + +

+
+ +

+ClusterDefinition defines the topology for databases or storage systems, +offering a variety of topological configurations to meet diverse deployment needs and scenarios. +

+ +

+It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, +which enhances reusability and reduce redundancy. +For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, +simplifying the setup of new systems. +

+ +

+Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, +ensuring a controlled and predictable management of cluster lifecycles. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ClusterDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterDefinitionSpec + + + +
+ +
+
+ + + + + + + + + +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+ +
+ +`status`
+ + +ClusterDefinitionStatus + + + +
+ + +
+

+Component + +

+
+ +

+Component is a fundamental building block of a Cluster object. +For example, a Redis Cluster can include Components like ‘redis’, ‘sentinel’, and potentially a proxy like ‘twemproxy’. +

+ +

+The Component object is responsible for managing the lifecycle of all replicas within a Cluster component, +It supports a wide range of operations including provisioning, stopping, restarting, termination, upgrading, +configuration changes, vertical and horizontal scaling, failover, switchover, cross-node migration, +scheduling configuration, exposing Services, managing system accounts, enabling/disabling exporter, +and configuring log collection. +

+ +

+Component is an internal sub-object derived from the user-submitted Cluster object. +It is designed primarily to be used by the KubeBlocks controllers, +users are discouraged from modifying Component objects directly and should use them only for monitoring Component statuses. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`Component` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ +(Optional) + +

+Specifies the behavior when a Component is deleted. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +`sidecars`
+ + +[]Sidecar + + + +
+ +(Optional) + +

+Specifies the sidecars to be injected into the Component. +

+ +
+ +
+ +`status`
+ + +ComponentStatus + + + +
+ + +
+

+ComponentDefinition + +

+
+ +

+ComponentDefinition serves as a reusable blueprint for creating Components, +encapsulating essential static settings such as Component description, +Pod templates, configuration file templates, scripts, parameter lists, +injected environment variables and their sources, and event handlers. +ComponentDefinition works in conjunction with dynamic settings from the ClusterComponentSpec, +to instantiate Components during Cluster creation. +

+ +

+Key aspects that can be defined in a ComponentDefinition include: +

+
    +
  • +PodSpec template: Specifies the PodSpec template used by the Component. +
  • +
  • +Configuration templates: Specify the configuration file templates required by the Component. +
  • +
  • +Scripts: Provide the necessary scripts for Component management and operations. +
  • +
  • +Storage volumes: Specify the storage volumes and their configurations for the Component. +
  • +
  • +Pod roles: Outlines various roles of Pods within the Component along with their capabilities. +
  • +
  • +Exposed Kubernetes Services: Specify the Services that need to be exposed by the Component. +
  • +
  • +System accounts: Define the system accounts required for the Component. +
  • +
  • +Monitoring and logging: Configure the exporter and logging settings for the Component. +
  • +
+ +

+ComponentDefinitions also enable defining reactive behaviors of the Component in response to events, +such as member join/leave, Component addition/deletion, role changes, switch over, and more. +This allows for automatic event handling, thus encapsulating complex behaviors within the Component. +

+ +

+Referencing a ComponentDefinition when creating individual Components ensures inheritance of predefined configurations, +promoting reusability and consistency across different deployments and cluster topologies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ComponentDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the config file templates and volume mount parameters used by the Component. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ config files. +Each template is represented as a ConfigMap and may contain multiple config files, with each file being a key in the ConfigMap. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +TLS + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`available`
+ + +ComponentAvailable + + + +
+ +(Optional) + +

+Specifies the strategies for determining the available status of the Component. +

+ +

+This field is immutable. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero or one role. +KubeBlocks operator determines the role of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns the role for each replica, and the returned role must be predefined here. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+To prevent privilege escalation, only permissions already owned by KubeBlocks can be added here. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +
+ +`status`
+ + +ComponentDefinitionStatus + + + +
+ + +
+

+ComponentVersion + +

+
+ +

+ComponentVersion is the Schema for the componentversions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ComponentVersion` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentVersionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+ +
+ +`status`
+ + +ComponentVersionStatus + + + +
+ + +
+

+ServiceDescriptor + +

+
+ +

+ServiceDescriptor describes a service provided by external sources. +It contains the necessary details such as the service’s address and connection credentials. +To enable a Cluster to access this service, the ServiceDescriptor’s name should be specified +in the Cluster configuration under `clusterComponent.serviceRefs[*].serviceDescriptor`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ServiceDescriptor` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ServiceDescriptorSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`podFQDNs`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the pod FQDNs of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+ +
+ +`status`
+ + +ServiceDescriptorStatus + + + +
+ + +
+

+ShardingDefinition + +

+
+ +

+ShardingDefinition is the Schema for the shardingdefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ShardingDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ShardingDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`template`
+ + +ShardingTemplate + + + +
+ + +

+This field is immutable. +

+ +
+ +`shardsLimit`
+ + +ShardsLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of shards supported by the sharding. +

+ +

+This field is immutable. +

+ +
+ +`provisionStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for provisioning shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ShardingLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a sharding throughout its lifecycle. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]ShardingSystemAccount + + + +
+ +(Optional) + +

+Defines the system accounts for the sharding. +

+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +ShardingTLS + + + +
+ +(Optional) + +

+Defines the TLS for the sharding. +

+ +

+This field is immutable. +

+ +
+ +
+ +`status`
+ + +ShardingDefinitionStatus + + + +
+ + +
+

+SidecarDefinition + +

+
+ +

+SidecarDefinition is the Schema for the sidecardefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`SidecarDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +SidecarDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the sidecar. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the component definition that the sidecar belongs to. +

+ +

+For a specific cluster object, if there is any components provided by the component definition of @owner, +the sidecar will be created and injected into the components which are provided by +the component definition of @selectors automatically. +

+ +

+This field is immutable. +

+ +
+ +`selectors`
+ +[]string + + +
+ + +

+Specifies the component definition of components that the sidecar along with. +

+ +

+This field is immutable. +

+ +
+ +`containers`
+ + +[]Kubernetes core/v1.Container + + + +
+ + +

+List of containers for the sidecar. +

+ +

+Cannot be updated. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are needed by the sidecar. +

+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the configuration file templates used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the scripts used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +
+ +`status`
+ + +SidecarDefinitionStatus + + + +
+ + +
+

+Action + +

+ +

+ +(Appears on:ClusterComponentConfig, ComponentLifecycleActions, Probe, ShardingLifecycleActions, ConfigTemplate, MembershipReconfiguration) + +

+
+ +

+Action defines a customizable hook or procedure tailored for different database engines, +designed to be invoked at predetermined points within the lifecycle of a Component instance. +It provides a modular and extensible way to customize a Component’s behavior through the execution of defined actions. +

+ +

+Available Action triggers include: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: Defines the procedure to transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+Actions can be executed in different ways: +

+
    +
  • +ExecAction: Executes a command inside a container. +A set of predefined environment variables are available and can be leveraged within the `exec.command` +to access context information such as details about pods, components, the overall cluster state, +or database connection credentials. +These variables provide a dynamic and context-aware mechanism for script execution. +
  • +
  • +HTTPAction: Performs an HTTP request. +HTTPAction is to be implemented in future version. +
  • +
  • +GRPCAction: In future version, Actions will support initiating gRPC calls. +This allows developers to implement Actions using plugins written in programming language like Go, +providing greater flexibility and extensibility. +
  • +
+ +

+An action is considered successful on returning 0, or HTTP 200 for status HTTP(s) Actions. +Any other return value or HTTP status codes indicate failure, +and the action may be retried based on the configured retry policy. +

+
    +
  • +If an action exceeds the specified timeout duration, it will be terminated, and the action is considered failed. +
  • +
  • +If an action produces any data as output, it should be written to stdout, +or included in the HTTP response payload for HTTP(s) actions. +
  • +
  • +If an action encounters any errors, error messages should be written to stderr, +or detailed in the HTTP response with the appropriate non-200 status code. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`exec`
+ + +ExecAction + + + +
+ +(Optional) + +

+Defines the command to run. +

+ +

+This field cannot be updated. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration in seconds that the Action is allowed to run. +

+ +

+If the Action does not complete within this time frame, it will be terminated. +

+ +

+This field cannot be updated. +

+ +
+ +`retryPolicy`
+ + +RetryPolicy + + + +
+ +(Optional) + +

+Defines the strategy to be taken when retrying the Action after a failure. +

+ +

+It specifies the conditions under which the Action should be retried and the limits to apply, +such as the maximum number of retries and backoff strategy. +

+ +

+This field cannot be updated. +

+ +
+ +`preCondition`
+ + +PreConditionType + + + +
+ +(Optional) + +

+Specifies the state that the cluster must reach before the Action is executed. +Currently, this is only applicable to the `postProvision` action. +

+ +

+The conditions are as follows: +

+
    +
  • +`Immediately`: Executed right after the Component object is created. +The readiness of the Component and its resources is not guaranteed at this stage. +
  • +
  • +`RuntimeReady`: The Action is triggered after the Component object has been created and all associated +runtime resources (e.g. Pods) are in a ready state. +
  • +
  • +`ComponentReady`: The Action is triggered after the Component itself is in a ready state. +This process does not affect the readiness state of the Component or the Cluster. +
  • +
  • +`ClusterReady`: The Action is executed after the Cluster is in a ready state. +This execution does not alter the Component or the Cluster’s state of readiness. +
  • +
+ +

+This field cannot be updated. +

+ +
+

+ActionAssertion + +

+ +

+ +(Appears on:ComponentAvailableProbeAssertion) + +

+
+ +

+ActionAssertion defines the custom assertions for evaluating the success or failure of an action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`succeed`
+ +bool + + +
+ +(Optional) + +

+Whether the action should succeed or fail. +

+ +

+This field is immutable once set. +

+ +
+ +`stdout`
+ + +ActionOutputMatcher + + + +
+ +(Optional) + +

+Specifies the stdout matcher for the action. +

+ +

+This field is immutable once set. +

+ +
+ +`stderr`
+ + +ActionOutputMatcher + + + +
+ +(Optional) + +

+Specifies the stderr matcher for the action. +

+ +

+This field is immutable once set. +

+ +
+

+ActionOutputMatcher + +

+ +

+ +(Appears on:ActionAssertion) + +

+
+ +

+ActionOutputMatcher defines the matcher for the output of an action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`equalTo`
+ +string + + +
+ +(Optional) + +

+The output of the action should be equal to the specified value. +

+ +

+This field is immutable once set. +

+ +
+ +`contains`
+ +string + + +
+ +(Optional) + +

+The output of the action should contain the specified value. +

+ +

+This field is immutable once set. +

+ +
+

+ClusterBackup + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether automated backup is enabled for the Cluster. +

+ +
+ +`retentionPeriod`
+ +github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.RetentionPeriod + + +
+ +(Optional) + +

+Determines the duration to retain backups. Backups older than this period are automatically removed. +

+ +

+For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +Default value is 7d. +

+ +
+ +`method`
+ +string + + +
+ + +

+Specifies the backup method to use, as defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Specifies the maximum time in minutes that the system will wait to start a missed backup job. +If the scheduled backup time is missed for any reason, the backup job must start within this deadline. +Values must be between 0 (immediate execution) and 1440 (one day). +

+ +
+ +`repoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the backupRepo. If not set, the default backupRepo will be used. +

+ +
+ +`pitrEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable point-in-time recovery. +

+ +
+ +`continuousMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the backup method to use, if not set, use the first continuous method. +

+ +
+ +`incrementalBackupEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable incremental backup. +

+ +
+ +`incrementalCronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the incremental backup schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+

+ClusterComponentConfig + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+ClusterComponentConfig represents a configuration for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the config. +

+ +
+ +`variables`
+ +map[string]string + + +
+ +(Optional) + +

+Variables are key-value pairs for dynamic configuration values that can be provided by the user. +

+ +
+ +`ClusterComponentConfigSource`
+ + +ClusterComponentConfigSource + + + +
+ + +

+ +(Members of `ClusterComponentConfigSource` are embedded into this type.) + +

+ +

+The external source for the configuration. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+The custom reconfigure action to reload the service configuration whenever changes to this config are detected. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_CONFIG_FILES_CREATED: file1,file2… +
  • +
  • +KB_CONFIG_FILES_REMOVED: file1,file2… +
  • +
  • +KB_CONFIG_FILES_UPDATED: file1:checksum1,file2:checksum2… +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`externalManaged`
+ +bool + + +
+ +(Optional) + +

+ExternalManaged indicates whether the configuration is managed by an external system. +When set to true, the controller will use the user-provided template and reconfigure action, +ignoring the default template and update behavior. +

+ +
+

+ClusterComponentConfigSource + +

+ +

+ +(Appears on:ClusterComponentConfig) + +

+
+ +

+ClusterComponentConfigSource represents the source of a configuration for a component. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ +(Optional) + +

+ConfigMap source for the config. +

+ +
+

+ClusterComponentService + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+References the ComponentService name defined in the `componentDefinition.spec.services[*].name`. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP` allocates a Cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort` builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the ClusterIP. +
  • +
  • +`LoadBalancer` builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the ClusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for ClusterComponentService. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to generate individual Services for each Pod. +If set to true, a separate Service will be created for each Pod in the Cluster. +

+ +
+

+ClusterComponentSpec + +

+ +

+ +(Appears on:ClusterSharding, ClusterSpec) + +

+
+ +

+ClusterComponentSpec defines the specification of a Component within a Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the Component’s name. +It’s part of the Service DNS name and must comply with the IANA service naming rule. +The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), +but required otherwise. +

+ +
+ +`componentDef`
+ +string + + +
+ +(Optional) + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+Supports three different ways to specify the ComponentDefinition: +

+
    +
  • +the regular expression - recommended +
  • +
  • +the full name - recommended +
  • +
  • +the name prefix +
  • +
+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +These environment variables will be placed after the environment variables declared in the Pod. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +If defined, it will overwrite the scheduling policy defined in ClusterSpec. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ClusterComponentService + + + +
+ +(Optional) + +

+Overrides services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`tls`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys, +to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ClusterComponentStatus + +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterComponentStatus records Component status. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ComponentPhase + + + +
+ + +

+Specifies the current state of the Component. +

+ +
+ +`message`
+ +map[string]string + + +
+ +(Optional) + +

+Records detailed information about the Component in its current phase. +The keys are either podName, deployName, or statefulSetName, formatted as ‘ObjectKind/Name’. +

+ +
+

+ClusterComponentVolumeClaimTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refers to the name of a volumeMount defined in either: +

+
    +
  • +`componentDefinition.spec.runtime.containers[*].volumeMounts` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].podSpec.containers[*].volumeMounts` (deprecated) +
  • +
+ +

+The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the labels for the PVC of the volume. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the annotations for the PVC of the volume. +

+ +
+ +`spec`
+ + +PersistentVolumeClaimSpec + + + +
+ +(Optional) + +

+Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume +with the mount name specified in the `name` field. +

+ +

+When a Pod is created for this ClusterComponent, a new PVC will be created based on the specification +defined in the `spec` field. The PVC will be associated with the volume mount specified by the `name` field. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +`volumeAttributesClassName`
+ +string + + +
+ +(Optional) + +

+volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. +

+ +

+More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass +

+ +
+ +
+

+ClusterDefinitionSpec + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionSpec defines the desired state of ClusterDefinition. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+

+ClusterDefinitionStatus + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionStatus defines the observed state of ClusterDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this ClusterDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the ClusterDefinition. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the ClusterDefinition is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`topologies`
+ +string + + +
+ +(Optional) + +

+Topologies this ClusterDefinition supported. +

+ +
+

+ClusterObjectReference + +

+ +

+ +(Appears on:ComponentVarSelector, CredentialVarSelector, HostNetworkVarSelector, ResourceVarSelector, ServiceRefVarSelector, ServiceVarSelector, TLSVarSelector) + +

+
+ +

+ClusterObjectReference defines information to let you locate the referenced object inside the same Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) used by the component that the referent object resident in. +

+ +

+If not specified, the component itself will be used. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Name of the referent object. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specify whether the object must be defined. +

+ +
+ +`multipleClusterObjectOption`
+ + +MultipleClusterObjectOption + + + +
+ +(Optional) + +

+This option defines the behavior when multiple component objects match the specified @CompDef. +If not provided, an error will be raised when handling multiple matches. +

+ +
+

+ClusterPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterPhase defines the phase of the Cluster within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterPhase represents some components are in `Failed` phase, indicates that the cluster is in +a fragile state and troubleshooting is required. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterPhase represents all components are in `Creating` phase. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterPhase indicates the cluster is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterPhase represents all components are in `Failed` phase, indicates that the cluster is unavailable. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterPhase represents all components are in `Running` phase, indicates that the cluster is functioning properly. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterPhase represents all components are in `Stopped` phase, indicates that the cluster has stopped and +is not providing any functionality. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterPhase represents at least one component is in `Stopping` phase, indicates that the cluster is in +the process of stopping. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterPhase represents all components are in `Creating`, `Running` or `Updating` phase, and at least one +component is in `Creating` or `Updating` phase, indicates that the cluster is undergoing an update. +

+ +
+

+ClusterService + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterService defines a service that is exposed externally, allowing entities outside the cluster to access it. +For example, external applications, or other Clusters. +And another Cluster managed by the same KubeBlocks operator can resolve the address exposed by a ClusterService +using the `serviceRef` field. +

+ +

+When a Component needs to access another Cluster’s ClusterService using the `serviceRef` field, +it must also define the service type and version information in the `componentDefinition.spec.serviceRefDeclarations` +section. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`componentSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of components, to be used as a selector for the service. +

+ +

+If the `componentSelector` is set as the name of a sharding, the service will be exposed to all components in the sharding. +

+ +
+

+ClusterSharding + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. +A typical design pattern for distributed databases is to distribute data across multiple shards, +with each shard consisting of multiple replicas. +Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components +using a template when shards are added. +When shards are removed, the corresponding Components are also deleted. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the common parent part of all shard names. +

+ +

+This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. +It is used to generate the names of underlying Components following the pattern `$(clusterSharding.name)-$(ShardID)`. +ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. +For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name +would be “my-shard-abc”. +

+ +

+Note that the name defined in Component template(`clusterSharding.template.name`) will be disregarded +when generating the Component names of the shards. The `clusterSharding.name` field takes precedence. +

+ +
+ +`shardingDef`
+ +string + + +
+ +(Optional) + +

+Specifies the ShardingDefinition custom resource (CR) that defines the sharding’s characteristics and behavior. +

+ +

+The full name or regular expression is supported to match the ShardingDefinition. +

+ +
+ +`template`
+ + +ClusterComponentSpec + + + +
+ + +

+The template for generating Components for shards, where each shard consists of one Component. +

+ +

+This field is of type ClusterComponentSpec, which encapsulates all the required details and +definitions for creating and managing the Components. +KubeBlocks uses this template to generate a set of identical Components of shards. +All the generated Components will have the same specifications and definitions as specified in the `template` field. +

+ +

+This allows for the creation of multiple Components with consistent configurations, +enabling sharding and distribution of workloads across Components. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards. +

+ +

+Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including: +

+
    +
  • +Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned. +
  • +
  • +Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted. +
  • +
+ +
+

+ClusterSpec + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterSpec defines the desired state of Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterDef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Delete`: Deletes all runtime resources belong to the Cluster. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`shardings`
+ + +[]ClusterSharding + + + +
+ +(Optional) + +

+Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ClusterSharding. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+

+ClusterStatus + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterStatus defines the observed state of the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the Cluster object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ClusterPhase + + + +
+ +(Optional) + +

+The current phase of the Cluster includes: +`Creating`, `Running`, `Updating`, `Stopping`, `Stopped`, `Deleting`, `Failed`, `Abnormal`. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all Components within the Cluster. +

+ +
+ +`shardings`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all shardings within the Cluster. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Cluster object. +Each condition in the list provides real-time information about certain aspect of the Cluster object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Cluster. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ClusterTopology + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterTopology represents the definition for a specific cluster topology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the unique identifier for the cluster topology. +Cannot be updated. +

+ +
+ +`components`
+ + +[]ClusterTopologyComponent + + + +
+ +(Optional) + +

+Components specifies the components in the topology. +

+ +
+ +`shardings`
+ + +[]ClusterTopologySharding + + + +
+ +(Optional) + +

+Shardings specifies the shardings in the topology. +

+ +
+ +`orders`
+ + +ClusterTopologyOrders + + + +
+ +(Optional) + +

+Specifies the sequence in which components within a cluster topology are +started, stopped, and upgraded. +This ordering is crucial for maintaining the correct dependencies and operational flow across components. +

+ +
+ +`default`
+ +bool + + +
+ +(Optional) + +

+Default indicates whether this topology serves as the default configuration. +When set to true, this topology is automatically used unless another is explicitly specified. +

+ +
+

+ClusterTopologyComponent + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyComponent defines a Component within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the component within the cluster topology. +

+ +

+It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+If the @template field is set to true, the name will be used as a prefix to match the specific components dynamically created. +

+ +

+Cannot be updated once set. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+The system selects the ComponentDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ComponentDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ComponentDefinition CR +by specifying a name prefix or regular expression pattern. +
  4. +
+ +

+Cannot be updated once set. +

+ +
+ +`template`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the topology component will be considered as a template for instantiating components upon user requests dynamically. +

+ +

+Cannot be updated once set. +

+ +
+

+ClusterTopologyOrders + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyOrders manages the lifecycle of components within a cluster by defining their provisioning, +terminating, and updating sequences. +It organizes components into stages or groups, where each group indicates a set of components +that can be managed concurrently. +These groups are processed sequentially, allowing precise control based on component dependencies and requirements. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provision`
+ +[]string + + +
+ +(Optional) + +

+Specifies the order for creating and initializing entities. +This is designed for entities that depend on one another. Entities without dependencies can be grouped together. +

+ +

+Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`terminate`
+ +[]string + + +
+ +(Optional) + +

+Outlines the order for stopping and deleting entities. +This sequence is designed for entities that require a graceful shutdown or have interdependencies. +

+ +

+Entities that can be terminated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`update`
+ +[]string + + +
+ +(Optional) + +

+Update determines the order for updating entities’ specifications, such as image upgrades or resource scaling. +This sequence is designed for entities that have dependencies or require specific update procedures. +

+ +

+Entities that can be updated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+

+ClusterTopologySharding + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologySharding defines a sharding within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the sharding within the cluster topology. +It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+Cannot be updated once set. +

+ +
+ +`shardingDef`
+ +string + + +
+ + +

+Specifies the sharding definition that defines the characteristics and behavior of the sharding. +

+ +

+The system selects the ShardingDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ShardingDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ShardingDefinition CR +by specifying a regular expression pattern. +
  4. +
+ +

+Once set, this field cannot be updated. +

+ +
+

+ClusterVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ClusterVarSelector selects a var from a Cluster. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterVars`
+ + +ClusterVars + + + +
+ + +

+ +(Members of `ClusterVars` are embedded into this type.) + +

+ +
+

+ClusterVars + +

+ +

+ +(Appears on:ClusterVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`namespace`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the namespace of the Cluster object. +

+ +
+ +`clusterName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Cluster object. +

+ +
+ +`clusterUID`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the UID of the Cluster object. +

+ +
+

+ComponentAvailable + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentAvailable defines the strategies for determining whether the component is available. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withPhases`
+ +string + + +
+ +(Optional) + +

+Specifies the phases that the component will go through to be considered available. +

+ +

+This field is immutable once set. +

+ +
+ +`withProbe`
+ + +ComponentAvailableWithProbe + + + +
+ +(Optional) + +

+Specifies the strategies for determining whether the component is available based on the available probe. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableCondition + +

+ +

+ +(Appears on:ComponentAvailableWithProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentAvailableExpression`
+ + +ComponentAvailableExpression + + + +
+ + +

+ +(Members of `ComponentAvailableExpression` are embedded into this type.) + +

+ +
+ +`and`
+ + +[]ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical And to combine multiple expressions. +

+ +

+This field is immutable once set. +

+ +
+ +`or`
+ + +[]ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical Or to combine multiple expressions. +

+ +

+This field is immutable once set. +

+ +
+ +`not`
+ + +ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical Not to negate the expression. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableExpression + +

+ +

+ +(Appears on:ComponentAvailableCondition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`all`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+All replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`any`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+At least one replica must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`none`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+None of the replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`majority`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+Majority replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableProbeAssertion + +

+ +

+ +(Appears on:ComponentAvailableExpression) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ActionAssertion`
+ + +ActionAssertion + + + +
+ + +

+ +(Members of `ActionAssertion` are embedded into this type.) + +

+ +
+ +`and`
+ + +[]ActionAssertion + + + +
+ +(Optional) + +

+Logical And to combine multiple assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`or`
+ + +[]ActionAssertion + + + +
+ +(Optional) + +

+Logical Or to combine multiple assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`not`
+ + +ActionAssertion + + + +
+ +(Optional) + +

+Logical Not to negate the assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`strict`
+ +bool + + +
+ +(Optional) + +

+Specifies whether apply the assertions strictly to all replicas. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableWithProbe + +

+ +

+ +(Appears on:ComponentAvailable) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`timeWindowSeconds`
+ +int32 + + +
+ +(Optional) + +

+This field is immutable once set. +

+ +
+ +`condition`
+ + +ComponentAvailableCondition + + + +
+ +(Optional) + +

+Specifies the conditions that the component will go through to be considered available. +

+ +

+This field is immutable once set. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+A brief description for the condition when the component is available. +

+ +
+

+ComponentDefinitionSpec + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the config file templates and volume mount parameters used by the Component. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ config files. +Each template is represented as a ConfigMap and may contain multiple config files, with each file being a key in the ConfigMap. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +TLS + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`available`
+ + +ComponentAvailable + + + +
+ +(Optional) + +

+Specifies the strategies for determining the available status of the Component. +

+ +

+This field is immutable. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero or one role. +KubeBlocks operator determines the role of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns the role for each replica, and the returned role must be predefined here. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+To prevent privilege escalation, only permissions already owned by KubeBlocks can be added here. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+

+ComponentDefinitionStatus + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+ +

+ComponentDefinitionStatus defines the observed state of ComponentDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ComponentDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ComponentDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ComponentDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ComponentFileTemplate + +

+ +

+ +(Appears on:ComponentDefinitionSpec, SidecarDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the template. +

+ +
+ +`template`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced template ConfigMap object. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+Refers to the volume name of PodTemplate. The file produced through the template will be mounted to +the corresponding volume. Must be a DNS_LABEL name. +The volume name must be defined in podSpec.containers[*].volumeMounts. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The operator attempts to set default file permissions (0444). +

+ +

+Must be specified as an octal value between 0000 and 0777 (inclusive), +or as a decimal value between 0 and 511 (inclusive). +YAML supports both octal and decimal values for file permissions. +

+ +

+Please note that this setting only affects the permissions of the files themselves. +Directories within the specified path are not impacted by this setting. +It’s important to be aware that this setting might conflict with other options +that influence the file mode, such as fsGroup. +In such cases, the resulting file mode may have additional bits set. +Refers to documents of k8s.ConfigMapVolumeSource.defaultMode for more information. +

+ +
+ +`externalManaged`
+ +bool + + +
+ +(Optional) + +

+ExternalManaged indicates whether the configuration is managed by an external system. +When set to true, the controller will ignore the management of this configuration. +

+ +
+ +`restartOnFileChange`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to restart the pod when the file changes. +

+ +
+

+ComponentLifecycleActions + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentLifecycleActions defines a collection of Actions for customizing the behavior of a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a component’s creation. +

+ +

+By setting `postProvision.customHandler.preCondition`, you can determine the specific lifecycle stage +at which the action should trigger: `Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +with `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a component. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a scale-down operation for the Component is initiated. +The actual termination and cleanup of the Component and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`roleProbe`
+ + +Probe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the role of replicas. +

+ +

+This action is periodically triggered at the specified interval to determine the role of each replica. +Upon successful execution, the action’s output designates the role of the replica, +which should match one of the predefined role names within `componentDefinition.spec.roles`. +The output is then compared with the previous successful execution result. +If a role change is detected, an event is generated to inform the controller, +which initiates an update of the replica’s role. +

+ +

+Defining a RoleProbe Action for a Component is required if roles are defined for the Component. +It ensures replicas are correctly labeled with their respective roles. +Without this, services that rely on roleSelectors might improperly direct traffic to wrong replicas. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the Pod whose role is being assessed. +
  • +
+ +

+Expected output of this action: +- On Success: The determined role of the replica, which must align with one of the roles specified + in the component definition. +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`availableProbe`
+ + +Probe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the availability of the component. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`switchover`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability +during events such as planned maintenance or when performing stop, shutdown, restart, or upgrade operations. +In a typical consensus system, this action is used to transfer leader role to another replica. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_SWITCHOVER_CANDIDATE_NAME: The name of the pod of the new role’s candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CANDIDATE_FQDN: The FQDN of the pod of the new role’s candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CURRENT_NAME: The name of the pod of the current role. +
  • +
  • +KB_SWITCHOVER_CURRENT_FQDN: The FQDN of the pod of the current role. +
  • +
  • +KB_SWITCHOVER_ROLE: The role that will be transferred to another replica. +This variable can be empty if, for example, role probe does not succeed. +It depends on the addon implementation what to do under such cases. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberJoin`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to add a new replica to the replication group. +

+ +

+This action is initiated after a replica pod becomes ready. +

+ +

+The role of the replica (e.g., primary, secondary) will be determined and assigned as part of the action command +implementation, or automatically by the database kernel or a sidecar utility like Patroni that implements +a consensus algorithm. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_JOIN_MEMBER_POD_FQDN: The pod FQDN of the replica being added to the group. +
  • +
  • +KB_JOIN_MEMBER_POD_NAME: The pod name of the replica being added to the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message detailing the reason for any failure encountered +during the addition of the new member. +

+ +

+For example, to add a new OBServer to an OceanBase Cluster in ‘zone1’, the following command may be used: +

+
+
+command:
+- bash
+- -c
+- |
+   CLIENT="mysql -u $SERVICE_USER -p$SERVICE_PASSWORD -P $SERVICE_PORT -h $SERVICE_HOST -e"
+	  $CLIENT "ALTER SYSTEM ADD SERVER '$KB_POD_FQDN:$SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberLeave`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to remove a replica from the replication group. +

+ +

+This action is initiated before remove a replica from the group. +The operator will wait for MemberLeave to complete successfully before releasing the replica and cleaning up +related Kubernetes resources. +

+ +

+The process typically includes updating configurations and informing other group members about the removal. +Data migration is generally not part of this action and should be handled separately if needed. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_LEAVE_MEMBER_POD_FQDN: The pod name of the replica being removed from the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_NAME: The pod name of the replica being removed from the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+For example, to remove an OBServer from an OceanBase Cluster in ‘zone1’, the following command can be executed: +

+
+
+command:
+- bash
+- -c
+- |
+   CLIENT="mysql -u $SERVICE_USER -p$SERVICE_PASSWORD -P $SERVICE_PORT -h $SERVICE_HOST -e"
+	  $CLIENT "ALTER SYSTEM DELETE SERVER '$KB_POD_FQDN:$SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readonly`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to switch a replica into the read-only state. +

+ +

+Use Case: +This action is invoked when the database’s volume capacity nears its upper limit and space is about to be exhausted. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readwrite`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to transition a replica from the read-only state back to the read-write state. +

+ +

+Use Case: +This action is used to bring back a replica that was previously in a read-only state, +which restricted write operations, to its normal operational state where it can handle +both read and write operations. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataDump`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for exporting the data from a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Applicability: +Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+The output should be a valid data dump streamed to stdout. It must exclude any irrelevant information to ensure +that only the necessary data is exported for import into the new replica. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_TARGET_POD_NAME: The name of the replica pod into which the data will be loaded. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataLoad`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for importing data into a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+Data should be received through stdin. If any error occurs during the process, +the action must be able to guarantee idempotence to allow for retries from the beginning. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure that update a replica with new configuration. +

+ +

+Note: This field is immutable once it has been set. +

+ +

+This Action is reserved for future versions. +

+ +
+ +`accountProvision`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to generate a new database account. +

+ +

+Use Case: +This action is designed to create system accounts that are utilized for replication, monitoring, backup, +and other administrative tasks. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_ACCOUNT_NAME: The name of the system account to be manipulated. +
  • +
  • +KB_ACCOUNT_PASSWORD: The password for the system account. +
  • +
  • +KB_ACCOUNT_STATEMENT: The statement used to manipulate the system account. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ComponentPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+ +

+ComponentPhase defines the phase of the Component within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +

+CreatingComponentPhase indicates the component is currently being created. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingComponentPhase indicates the component is currently being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedComponentPhase indicates that there are some pods of the component not in a ‘Running’ state. +

+ +
+ +

+"Running" +

+
+ +

+RunningComponentPhase indicates that all pods of the component are up-to-date and in a ‘Running’ state. +

+ +
+ +

+"Starting" +

+
+ +

+StartingComponentPhase indicates the component is currently being started. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedComponentPhase indicates the component is stopped. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingComponentPhase indicates the component is currently being stopped. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingComponentPhase indicates the component is currently being updated. +

+ +
+

+ComponentService + +

+ +

+ +(Appears on:ComponentDefinitionSpec, ComponentSpec) + +

+
+ +

+ComponentService defines a service that would be exposed as an inter-component service within a Cluster. +A Service defined in the ComponentService is expected to be accessed by other Components within the same Cluster. +

+ +

+When a Component needs to use a ComponentService provided by another Component within the same Cluster, +it can declare a variable in the `componentDefinition.spec.vars` section and bind it to the specific exposed address +of the ComponentService using the `serviceVarRef` field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to create a corresponding Service for each Pod of the selected Component. +When set to true, a set of Services will be automatically generated for each Pod, +and the `roleSelector` field will be ignored. +

+ +

+The names of the generated Services will follow the same suffix naming pattern: `$(serviceName)-$(podOrdinal)`. +The total number of generated Services will be equal to the number of replicas specified for the Component. +

+ +

+Example usage: +

+
+
+name: my-service
+serviceName: my-service
+podService: true
+disableAutoProvision: true
+spec:
+  type: NodePort
+  ports:
+  - name: http
+    port: 80
+    targetPort: 8080
+
+
+ +

+In this example, if the Component has 3 replicas, three Services will be generated: +- my-service-0: Points to the first Pod (podOrdinal: 0) +- my-service-1: Points to the second Pod (podOrdinal: 1) +- my-service-2: Points to the third Pod (podOrdinal: 2) +

+ +

+Each generated Service will have the specified spec configuration and will target its respective Pod. +

+ +

+This feature is useful when you need to expose each Pod of a Component individually, allowing external access +to specific instances of the Component. +

+ +
+ +`disableAutoProvision`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the automatic provisioning of the service should be disabled. +

+ +

+If set to true, the service will not be automatically created at the component provisioning. +Instead, you can enable the creation of this service by specifying it explicitly in the cluster API. +

+ +
+

+ComponentSpec + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentSpec defines the desired state of Component +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ +(Optional) + +

+Specifies the behavior when a Component is deleted. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +`sidecars`
+ + +[]Sidecar + + + +
+ +(Optional) + +

+Specifies the sidecars to be injected into the Component. +

+ +
+

+ComponentStatus + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentStatus represents the observed state of a Component within the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Specifies the most recent generation observed for this Component object. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Component object. +Each condition in the list provides real-time information about certain aspect of the Component object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Component. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+ +`phase`
+ + +ComponentPhase + + + +
+ + +

+Indicates the current phase of the Component, with each phase indicating specific conditions: +

+
    +
  • +Creating: The initial phase for new Components, transitioning from ‘empty’(“”). +
  • +
  • +Running: All Pods are up-to-date and in a Running state. +
  • +
  • +Updating: The Component is currently being updated, with no failed Pods present. +
  • +
  • +Failed: A significant number of Pods have failed. +
  • +
  • +Stopping: All Pods are being terminated, with current replica count at zero. +
  • +
  • +Stopped: All associated Pods have been successfully deleted. +
  • +
  • +Starting: Pods are being started. +
  • +
  • +Deleting: The Component is being deleted. +
  • +
+ +
+ +`message`
+ +map[string]string + + +
+ +(Optional) + +

+A map that stores detailed message about the Component. +Each entry in the map provides insights into specific elements of the Component, such as Pods or workloads. +

+ +

+Keys in this map are formatted as `ObjectKind/Name`, where `ObjectKind` could be a type like Pod, +and `Name` is the specific name of the object. +

+ +
+

+ComponentSystemAccount + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account. +

+ +
+ +`disabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the system account is disabled. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+For user-specified passwords, the maximum length is limited to 64 bytes. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ComponentVarSelector selects a var from a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ComponentVars`
+ + +ComponentVars + + + +
+ + +

+ +(Members of `ComponentVars` are embedded into this type.) + +

+ +
+

+ComponentVars + +

+ +

+ +(Appears on:ComponentVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Component object. +

+ +
+ +`shortName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the short name of the Component object. +

+ +
+ +`replicas`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the replicas of the component. +

+ +
+ +`podNames`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod name list of the component. +and the value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+ +`podNamesForRole`
+ + +RoledVar + + + +
+ +(Optional) + +

+Reference to the pod name list of the component that have a specific role. +The value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNsForRole`
+ + +RoledVar + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component that have a specific role. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+

+ComponentVersionCompatibilityRule + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionCompatibilityRule defines the compatibility between a set of component definitions and a set of releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this ComponentVersion. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`releases`
+ +[]string + + +
+ + +

+Releases is a list of identifiers for the releases. +

+ +
+

+ComponentVersionRelease + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionRelease represents a release of component instances within a ComponentVersion. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is a unique identifier for this release. +Cannot be updated. +

+ +
+ +`changes`
+ +string + + +
+ +(Optional) + +

+Changes provides information about the changes made in this release. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+ServiceVersion defines the version of the well-known service that the component provides. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If the release is used, it will serve as the service version for component instances, overriding the one defined in the component definition. +Cannot be updated. +

+ +
+ +`images`
+ +map[string]string + + +
+ + +

+Images define the new images for containers, actions or external applications within the release. +

+ +

+If an image is specified for a lifecycle action, the key should be the field name (case-insensitive) of +the action in the LifecycleActions struct. +

+ +
+

+ComponentVersionSpec + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionSpec defines the desired state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+

+ComponentVersionStatus + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionStatus defines the observed state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this ComponentVersion. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase valid values are ``,`Available`, 'Unavailable`. +Available is ComponentVersion become available, and can be used for co-related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Extra message for current phase. +

+ +
+ +`serviceVersions`
+ +string + + +
+ +(Optional) + +

+ServiceVersions represent the supported service versions of this ComponentVersion. +

+ +
+

+ComponentVolume + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the volume. +It must be a DNS_LABEL and unique within the pod. +More info can be found at: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +Note: This field cannot be updated. +

+ +
+ +`needSnapshot`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the creation of a snapshot of this volume is necessary when performing a backup of the Component. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Sets the critical threshold for volume space utilization as a percentage (0-100). +

+ +

+Exceeding this percentage triggers the system to switch the volume to read-only mode as specified in +`componentDefinition.spec.lifecycleActions.readOnly`. +This precaution helps prevent space depletion while maintaining read-only access. +If the space utilization later falls below this threshold, the system reverts the volume to read-write mode +as defined in `componentDefinition.spec.lifecycleActions.readWrite`, restoring full functionality. +

+ +

+Note: This field cannot be updated. +

+ +
+

+ConnectionCredentialAuth + +

+ +

+ +(Appears on:ServiceDescriptorSpec) + +

+
+ +

+ConnectionCredentialAuth specifies the authentication credentials required for accessing an external service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the username for the external service. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the password for the external service. +

+ +
+

+ContainerVars + +

+ +

+ +(Appears on:HostNetworkVars) + +

+
+ +

+ContainerVars defines the vars that can be referenced from a Container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the container. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Container port to reference. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:ConnectionCredentialAuth, ServiceDescriptorSpec) + +

+
+ +

+CredentialVar represents a variable that retrieves its value either directly from a specified expression +or from a source defined in `valueFrom`. +Only one of these options may be used at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Holds a direct string or an expression that can be evaluated to a string. +

+ +

+It can include variables denoted by $(VAR_NAME). +These variables are expanded to the value of the environment variables defined in the container. +If a variable cannot be resolved, it remains unchanged in the output. +

+ +

+To escape variable expansion and retain the literal value, use double $ characters. +

+ +

+For example: +

+
    +
  • +”$(VAR_NAME)” will be expanded to the value of the environment variable VAR_NAME. +
  • +
  • +”$$(VAR_NAME)” will result in “$(VAR_NAME)” in the output, without any variable expansion. +
  • +
+ +

+Default value is an empty string. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Specifies the source for the variable’s value. +

+ +
+

+CredentialVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+CredentialVarSelector selects a var from a Credential (SystemAccount). +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Credential (SystemAccount) to select from. +

+ +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+CredentialVars + +

+ +

+ +(Appears on:CredentialVarSelector, ServiceRefVars) + +

+
+ +

+CredentialVars defines the vars that can be referenced from a Credential (SystemAccount). +!!!!! CredentialVars will only be used as environment variables for Pods & Actions, and will not be used to render the templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`password`
+ + +VarOption + + + +
+ +(Optional) + +
+

+EnvVar + +

+ +

+ +(Appears on:ComponentDefinitionSpec, SidecarDefinitionSpec) + +

+
+ +

+EnvVar represents a variable present in the env of Pod/Action or the template of config/script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the variable. Must be a C_IDENTIFIER. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Variable references `$(VAR_NAME)` are expanded using the previously defined variables in the current context. +

+ +

+If a variable cannot be resolved, the reference in the input string will be unchanged. +Double `$$` are reduced to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. +

+
    +
  • +`$$(VAR_NAME)` will produce the string literal `$(VAR_NAME)`. +
  • +
+ +

+Escaped references will never be expanded, regardless of whether the variable exists or not. +Defaults to “”. +

+ +
+ +`valueFrom`
+ + +VarSource + + + +
+ +(Optional) + +

+Source for the variable’s value. Cannot be used if value is not empty. +

+ +
+ +`expression`
+ +string + + +
+ +(Optional) + +

+A Go template expression that will be applied to the resolved value of the var. +

+ +

+The expression will only be evaluated if the var is successfully resolved to a non-credential value. +

+ +

+The resolved value can be accessed by its name within the expression, system vars and other user-defined +non-credential vars can be used within the expression in the same way. +Notice that, when accessing vars by its name, you should replace all the “-” in the name with “_”, because of +that “-” is not a valid identifier in Go. +

+ +

+All expressions are evaluated in the order the vars are defined. If a var depends on any vars that also +have expressions defined, be careful about the evaluation order as it may use intermediate values. +

+ +

+The result of evaluation will be used as the final value of the var. If the expression fails to evaluate, +the resolving of var will also be considered failed. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+ExecAction describes an Action that executes a command inside a container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the container image to be used for running the Action. +

+ +

+When specified, a dedicated container will be created using this image to execute the Action. +All actions with same image will share the same container. +

+ +

+This field cannot be updated. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Represents a list of environment variables that will be injected into the container. +These variables enable the container to adapt its behavior based on the environment it’s running in. +

+ +

+This field cannot be updated. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed inside the container. +The working directory for this command is the container’s root directory(‘/’). +Commands are executed directly without a shell environment, meaning shell-specific syntax (‘|’, etc.) is not supported. +If the shell is required, it must be explicitly invoked in the command. +

+ +

+A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Args represents the arguments that are passed to the `command` for execution. +

+ +
+ +`targetPodSelector`
+ + +TargetPodSelector + + + +
+ +(Optional) + +

+Defines the criteria used to select the target Pod(s) for executing the Action. +This is useful when there is no default target replica identified. +It allows for precise control over which Pod(s) the Action should run in. +

+ +

+If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod +to be removed or added; or a random pod if the Action is triggered at the component level, such as +post-provision or pre-terminate of the component. +

+ +

+This field cannot be updated. +

+ +
+ +`matchingKey`
+ +string + + +
+ +(Optional) + +

+Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. +The impact of this field depends on the `targetPodSelector` value: +

+
    +
  • +When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. +
  • +
  • +When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` +will be selected for the Action. +
  • +
+ +

+This field cannot be updated. +

+ +
+ +`container`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the container within the same pod whose resources will be shared with the action. +This allows the action to utilize the specified container’s resources without executing within it. +

+ +

+The name must match one of the containers defined in `componentDefinition.spec.runtime`. +

+ +

+The resources that can be shared are included: +

+
    +
  • +volume mounts +
  • +
+ +

+This field cannot be updated. +

+ +
+

+Exporter + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the built-in metrics exporter container. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+Specifies the http/https url path to scrape for metrics. +If empty, Prometheus uses the default value (e.g. `/metrics`). +

+ +
+ +`scrapePort`
+ +string + + +
+ +(Optional) + +

+Specifies the port name to scrape for metrics. +

+ +
+ +`scrapeScheme`
+ + +PrometheusScheme + + + +
+ +(Optional) + +

+Specifies the schema to use for scraping. +`http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. +If empty, Prometheus uses the default value `http`. +

+ +
+

+HostNetwork + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerPorts`
+ + +[]HostNetworkContainerPort + + + +
+ +(Optional) + +

+The list of container ports that are required by the component. +

+ +
+

+HostNetworkContainerPort + +

+ +

+ +(Appears on:HostNetwork) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ + +

+Container specifies the target container within the Pod. +

+ +
+ +`ports`
+ +[]string + + +
+ + +

+Ports are named container ports within the specified container. +These container ports must be defined in the container for proper port allocation. +

+ +
+

+HostNetworkVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+HostNetworkVarSelector selects a var from host-network resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The component to select from. +

+ +
+ +`HostNetworkVars`
+ + +HostNetworkVars + + + +
+ + +

+ +(Members of `HostNetworkVars` are embedded into this type.) + +

+ +
+

+HostNetworkVars + +

+ +

+ +(Appears on:HostNetworkVarSelector) + +

+
+ +

+HostNetworkVars defines the vars that can be referenced from host-network resources. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ + +ContainerVars + + + +
+ +(Optional) + +
+

+InstanceTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the instance. +If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or ClusterComponentSpec. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+InstanceUpdateStrategy defines fine-grained control over the spec update process of all instances. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +InstanceUpdateStrategyType + + + +
+ +(Optional) + +

+Indicates the type of the update strategy. +Default is RollingUpdate. +

+ +
+ +`rollingUpdate`
+ + +RollingUpdate + + + +
+ +(Optional) + +

+Specifies how the rolling update should be applied. +

+ +
+

+InstanceUpdateStrategyType +(`string` alias) +

+ +

+ +(Appears on:InstanceUpdateStrategy) + +

+
+ +

+InstanceUpdateStrategyType is a string enumeration type that enumerates +all possible update strategies for the KubeBlocks controllers. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"OnDelete" +

+
+ +

+OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated +when they are manually deleted. +

+ +
+ +

+"RollingUpdate" +

+
+ +

+RollingUpdateStrategyType indicates that update will be +applied to all Instances with respect to the workload +ordering constraints. +

+ +
+

+Issuer + +

+ +

+ +(Appears on:ClusterComponentSpec, TLSConfig) + +

+
+ +

+Issuer defines the TLS certificates issuer for the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +IssuerName + + + +
+ + +

+The issuer for TLS certificates. +It only allows two enum values: `KubeBlocks` and `UserProvided`. +

+
    +
  • +`KubeBlocks` indicates that the self-signed TLS certificates generated by the KubeBlocks Operator will be used. +
  • +
  • +`UserProvided` means that the user is responsible for providing their own CA, Cert, and Key. +In this case, the user-provided CA certificate, server certificate, and private key will be used +for TLS communication. +
  • +
+ +
+ +`secretRef`
+ + +TLSSecretRef + + + +
+ +(Optional) + +

+SecretRef is the reference to the secret that contains user-provided certificates. +It is required when the issuer is set to `UserProvided`. +

+ +
+

+IssuerName +(`string` alias) +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+IssuerName defines the name of the TLS certificates issuer. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeBlocks" +

+
+ +

+IssuerKubeBlocks represents certificates that are signed by the KubeBlocks Operator. +

+ +
+ +

+"UserProvided" +

+
+ +

+IssuerUserProvided indicates that the user has provided their own CA-signed certificates. +

+ +
+

+LetterCase +(`string` alias) +

+ +

+ +(Appears on:PasswordConfig) + +

+
+ +

+LetterCase defines the available cases to be used in password generation. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"LowerCases" +

+
+ +

+LowerCases represents the use of lower case letters only. +

+ +
+ +

+"MixedCases" +

+
+ +

+MixedCases represents the use of a mix of both lower and upper case letters. +

+ +
+ +

+"UpperCases" +

+
+ +

+UpperCases represents the use of upper case letters only. +

+ +
+

+LogConfig + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a descriptive label for the log type, such as ‘slow’ for a MySQL slow log file. +It provides a clear identification of the log’s purpose and content. +

+ +
+ +`filePathPattern`
+ +string + + +
+ + +

+Specifies the paths or patterns identifying where the log files are stored. +This field allows the system to locate and manage log files effectively. +

+ +

+Examples: +

+
    +
  • +/home/postgres/pgdata/pgroot/data/log/postgresql-* +
  • +
  • +/data/mysql/log/mysqld-error.log +
  • +
+ +
+

+MultipleClusterObjectCombinedOption + +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectCombinedOption defines options for handling combined variables. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`newVarSuffix`
+ +string + + +
+ +(Optional) + +

+If set, the existing variable will be kept, and a new variable will be defined with the specified suffix +in pattern: $(var.name)_$(suffix). +The new variable will be auto-created and placed behind the existing one. +If not set, the existing variable will be reused with the value format defined below. +

+ +
+ +`valueFormat`
+ + +MultipleClusterObjectValueFormat + + + +
+ +(Optional) + +

+The format of the value that the operator will use to compose values from multiple components. +

+ +
+ +`flattenFormat`
+ + +MultipleClusterObjectValueFormatFlatten + + + +
+ +(Optional) + +

+The flatten format, default is: $(comp-name-1):value,$(comp-name-2):value. +

+ +
+

+MultipleClusterObjectOption + +

+ +

+ +(Appears on:ClusterObjectReference) + +

+
+ +

+MultipleClusterObjectOption defines the options for handling multiple cluster objects matched. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requireAllComponentObjects`
+ +bool + + +
+ +(Optional) + +

+RequireAllComponentObjects controls whether all component objects must exist before resolving. +If set to true, resolving will only proceed if all component objects are present. +

+ +
+ +`strategy`
+ + +MultipleClusterObjectStrategy + + + +
+ + +

+Define the strategy for handling multiple cluster objects. +

+ +
+ +`combinedOption`
+ + +MultipleClusterObjectCombinedOption + + + +
+ +(Optional) + +

+Define the options for handling combined variables. +Valid only when the strategy is set to “combined”. +

+ +
+

+MultipleClusterObjectStrategy +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectStrategy defines the strategy for handling multiple cluster objects. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"combined" +

+
+ +

+MultipleClusterObjectStrategyCombined - the values from all matched components will be combined into a single +variable using the specified option. +

+ +
+ +

+"individual" +

+
+ +

+MultipleClusterObjectStrategyIndividual - each matched component will have its individual variable with its name +as the suffix. +This is required when referencing credential variables that cannot be passed by values. +

+ +
+

+MultipleClusterObjectValueFormat +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormat defines the format details for the value. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Flatten" +

+
+ +
+

+MultipleClusterObjectValueFormatFlatten + +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormatFlatten defines the flatten format for the value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`delimiter`
+ +string + + +
+ + +

+Pair delimiter. +

+ +
+ +`keyValueDelimiter`
+ +string + + +
+ + +

+Key-value delimiter. +

+ +
+

+NamedVar + +

+ +

+ +(Appears on:ContainerVars, ResourceVars, ServiceVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+Ordinals + +

+ +

+ +(Appears on:InstanceTemplate, InstanceSetSpec, InstanceTemplate) + +

+
+ +

+Ordinals represents a combination of continuous segments and individual values. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ranges`
+ + +[]Range + + + +
+ + +
+ +`discrete`
+ +[]int32 + + +
+ + +
+

+PasswordConfig + +

+ +

+ +(Appears on:ComponentSystemAccount, SystemAccount) + +

+
+ +

+PasswordConfig helps provide to customize complexity of password generation pattern. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`length`
+ +int32 + + +
+ +(Optional) + +

+The length of the password. +

+ +
+ +`numDigits`
+ +int32 + + +
+ +(Optional) + +

+The number of digits in the password. +

+ +
+ +`numSymbols`
+ +int32 + + +
+ +(Optional) + +

+The number of symbols in the password. +

+ +
+ +`letterCase`
+ + +LetterCase + + + +
+ +(Optional) + +

+The case of the letters in the password. +

+ +
+ +`seed`
+ +string + + +
+ +(Optional) + +

+Seed to generate the account’s password. +Cannot be updated. +

+ +
+

+PersistentVolumeClaimRetentionPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the VolumeClaimTemplates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`whenDeleted`
+ + +PersistentVolumeClaimRetentionPolicyType + + + +
+ +(Optional) + +

+WhenDeleted specifies what happens to PVCs created from VolumeClaimTemplates when the workload is deleted. +The `Retain` policy causes PVCs to not be affected by workload deletion. +The default policy of `Delete` causes those PVCs to be deleted. +

+ +
+ +`whenScaled`
+ + +PersistentVolumeClaimRetentionPolicyType + + + +
+ +(Optional) + +

+WhenScaled specifies what happens to PVCs created from VolumeClaimTemplates when the workload is scaled down. +The `Retain` policy causes PVCs to not be affected by a scale down. +The default policy of `Delete` causes the associated PVCs for pods scaled down to be deleted. +

+ +
+

+PersistentVolumeClaimRetentionPolicyType +(`string` alias) +

+ +

+ +(Appears on:PersistentVolumeClaimRetentionPolicy) + +

+
+ +

+PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +deleted or scaled down. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+DeletePersistentVolumeClaimRetentionPolicyType specifies that PersistentVolumeClaims associated with +VolumeClaimTemplates will be deleted in the scenario specified in PersistentVolumeClaimRetentionPolicy. +

+ +
+ +

+"Retain" +

+
+ +

+RetainPersistentVolumeClaimRetentionPolicyType is the default PersistentVolumeClaimRetentionPolicy +and specifies that PersistentVolumeClaims associated with VolumeClaimTemplates will not be deleted. +

+ +
+

+PersistentVolumeClaimSpec + +

+ +

+ +(Appears on:ClusterComponentVolumeClaimTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +`volumeAttributesClassName`
+ +string + + +
+ +(Optional) + +

+volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. +

+ +

+More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ClusterDefinitionStatus, ComponentDefinitionStatus, ComponentVersionStatus, ServiceDescriptorStatus, ShardingDefinitionStatus, SidecarDefinitionStatus) + +

+
+ +

+Phase represents the status of a CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that a CR is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that a CR is in an unavailable state. +

+ +
+

+PodUpdatePolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+PodUpdatePolicyType indicates how pods should be updated +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"PreferInPlace" +

+
+ +

+PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +

+ +
+ +

+"StrictInPlace" +

+
+ +

+StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +

+ +
+

+PreConditionType +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+PreConditionType defines the preCondition type of the action execution. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"ClusterReady" +

+
+ +
+ +

+"ComponentReady" +

+
+ +
+ +

+"Immediately" +

+
+ +
+ +

+"RuntimeReady" +

+
+ +
+

+Probe + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Action`
+ + +Action + + + +
+ + +

+ +(Members of `Action` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Minimum value is 1. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1. +

+ +
+

+PrometheusScheme +(`string` alias) +

+ +

+ +(Appears on:Exporter) + +

+
+ +

+PrometheusScheme defines the protocol of prometheus scrape metrics. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"http" +

+
+ +
+ +

+"https" +

+
+ +
+

+ProvisionSecretRef + +

+ +

+ +(Appears on:ComponentSystemAccount) + +

+
+ +

+ProvisionSecretRef represents the reference to a secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The unique identifier of the secret. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+The namespace where the secret is located. +

+ +
+ +`password`
+ +string + + +
+ +(Optional) + +

+The key in the secret data that contains the password. +

+ +
+

+Range + +

+ +

+ +(Appears on:Ordinals) + +

+
+ +

+Range represents a range with a start and an end value. +It is used to define a continuous segment. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`start`
+ +int32 + + +
+ + +
+ +`end`
+ +int32 + + +
+ + +
+

+ReplicaRole + +

+ +

+ +(Appears on:ComponentDefinitionSpec, InstanceSetSpec, MemberStatus) + +

+
+ +

+ReplicaRole represents a role that can be assigned to a component instance, defining its behavior and responsibilities. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the role’s unique identifier. This value is used to set the “apps.kubeblocks.io/role” label +on the corresponding object to identify its role. +

+ +

+For example, common role names include: +- “leader”: The primary/master instance that handles write operations +- “follower”: Secondary/replica instances that replicate data from the leader +- “learner”: Read-only instances that don’t participate in elections +

+ +

+This field is immutable once set. +

+ +
+ +`updatePriority`
+ +int + + +
+ +(Optional) + +

+UpdatePriority determines the order in which pods with different roles are updated. +Pods are sorted by this priority (higher numbers = higher priority) and updated accordingly. +Roles with the highest priority will be updated last. +The default priority is 0. +

+ +

+For example: +- Leader role may have priority 2 (updated last) +- Follower role may have priority 1 (updated before leader) +- Learner role may have priority 0 (updated first) +

+ +

+This field is immutable once set. +

+ +
+ +`participatesInQuorum`
+ +bool + + +
+ +(Optional) + +

+ParticipatesInQuorum indicates if pods with this role are counted when determining quorum. +This affects update strategies that need to maintain quorum for availability. Roles participate +in quorum should have higher update priority than roles do not participate in quorum. +The default value is false. +

+ +

+For example, in a 5-pod component where: +- 2 learner pods (participatesInQuorum=false) +- 2 follower pods (participatesInQuorum=true) +- 1 leader pod (participatesInQuorum=true) +The quorum size would be 3 (based on the 3 participating pods), allowing parallel updates +of 2 learners and 1 follower while maintaining quorum. +

+ +

+This field is immutable once set. +

+ +
+

+ReplicasLimit + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicasLimit defines the valid range of number of replicas supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minReplicas`
+ +int32 + + +
+ + +

+The minimum limit of replicas. +

+ +
+ +`maxReplicas`
+ +int32 + + +
+ + +

+The maximum limit of replicas. +

+ +
+

+ResourceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ResourceVarSelector selects a var from a kind of resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ResourceVars`
+ + +ResourceVars + + + +
+ + +

+ +(Members of `ResourceVars` are embedded into this type.) + +

+ +
+

+ResourceVars + +

+ +

+ +(Appears on:ResourceVarSelector) + +

+
+ +

+ResourceVars defines the vars that can be referenced from resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cpu`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`cpuLimit`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`memory`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`memoryLimit`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`storage`
+ + +NamedVar + + + +
+ +(Optional) + +
+

+RetryPolicy + +

+ +

+ +(Appears on:Action) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`maxRetries`
+ +int + + +
+ +(Optional) + +

+Defines the maximum number of retry attempts that should be made for a given Action. +This value is set to 0 by default, indicating that no retries will be made. +

+ +
+ +`retryInterval`
+ +time.Duration + + +
+ +(Optional) + +

+Indicates the duration of time to wait between each retry attempt. +This value is set to 0 by default, indicating that there will be no delay between retry attempts. +

+ +
+

+RoledVar + +

+ +

+ +(Appears on:ComponentVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+RollingUpdate + +

+ +

+ +(Appears on:InstanceUpdateStrategy) + +

+
+ +

+RollingUpdate specifies how the rolling update should be applied. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Indicates the number of instances that should be updated during a rolling update. +The remaining instances will remain untouched. This is helpful in defining how many instances +should participate in the update process. +Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%). +Absolute number is calculated from percentage by rounding up. +The default value is ComponentSpec.Replicas (i.e., update all instances). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of instances that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all instances. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate, InstanceTemplate) + +

+
+ +

+SchedulingPolicy defines the scheduling policy for instances. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+

+Service + +

+ +

+ +(Appears on:ClusterService, ComponentService) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the name of the service. +otherwise, it indicates the name of the service. +Others can refer to this service by its name. (e.g., connection credential) +Cannot be updated. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+ServiceName defines the name of the underlying service object. +If not specified, the default service name with different patterns will be used: +

+
    +
  • +CLUSTER_NAME: for cluster-level services +
  • +
  • +CLUSTER_NAME-COMPONENT_NAME: for component-level services +
  • +
+ +

+Only one default service name is allowed. +Cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`spec`
+ + +Kubernetes core/v1.ServiceSpec + + + +
+ +(Optional) + +

+Spec defines the behavior of a service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+Route service traffic to pods with label keys and values matching this +selector. If empty or not present, the service is assumed to have an +external process managing its endpoints, which Kubernetes will not +modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. +Ignored if type is ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/ +

+ +
+ +`clusterIP`
+ +string + + +
+ +(Optional) + +

+clusterIP is the IP address of the service and is usually assigned +randomly. If an address is specified manually, is in-range (as per +system configuration), and is not in use, it will be allocated to the +service; otherwise creation of the service will fail. This field may not +be changed through updates unless the type field is also being changed +to ExternalName (which requires this field to be blank) or the type +field is being changed from ExternalName (in which case this field may +optionally be specified, as describe above). Valid values are “None”, +empty string (“”), or a valid IP address. Setting this to “None” makes a +“headless service” (no virtual IP), which is useful when direct endpoint +connections are preferred and proxying is not required. Only applies to +types ClusterIP, NodePort, and LoadBalancer. If this field is specified +when creating a Service of type ExternalName, creation will fail. This +field will be wiped when updating a Service to type ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`clusterIPs`
+ +[]string + + +
+ +(Optional) + +

+ClusterIPs is a list of IP addresses assigned to this service, and are +usually assigned randomly. If an address is specified manually, is +in-range (as per system configuration), and is not in use, it will be +allocated to the service; otherwise creation of the service will fail. +This field may not be changed through updates unless the type field is +also being changed to ExternalName (which requires this field to be +empty) or the type field is being changed from ExternalName (in which +case this field may optionally be specified, as describe above). Valid +values are “None”, empty string (“”), or a valid IP address. Setting +this to “None” makes a “headless service” (no virtual IP), which is +useful when direct endpoint connections are preferred and proxying is +not required. Only applies to types ClusterIP, NodePort, and +LoadBalancer. If this field is specified when creating a Service of type +ExternalName, creation will fail. This field will be wiped when updating +a Service to type ExternalName. If this field is not specified, it will +be initialized from the clusterIP field. If this field is specified, +clients must ensure that clusterIPs[0] and clusterIP have the same +value. +

+ +

+This field may hold a maximum of two entries (dual-stack IPs, in either order). +These IPs must correspond to the values of the ipFamilies field. Both +clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`type`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+type determines how the Service is exposed. Defaults to ClusterIP. Valid +options are ExternalName, ClusterIP, NodePort, and LoadBalancer. +“ClusterIP” allocates a cluster-internal IP address for load-balancing +to endpoints. Endpoints are determined by the selector or if that is not +specified, by manual construction of an Endpoints object or +EndpointSlice objects. If clusterIP is “None”, no virtual IP is +allocated and the endpoints are published as a set of endpoints rather +than a virtual IP. +“NodePort” builds on ClusterIP and allocates a port on every node which +routes to the same endpoints as the clusterIP. +“LoadBalancer” builds on NodePort and creates an external load-balancer +(if supported in the current cloud) which routes to the same endpoints +as the clusterIP. +“ExternalName” aliases this service to the specified externalName. +Several other fields do not apply to ExternalName services. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +

+ +
+ +`externalIPs`
+ +[]string + + +
+ +(Optional) + +

+externalIPs is a list of IP addresses for which nodes in the cluster +will also accept traffic for this service. These IPs are not managed by +Kubernetes. The user is responsible for ensuring that traffic arrives +at a node with this IP. A common example is external load-balancers +that are not part of the Kubernetes system. +

+ +
+ +`sessionAffinity`
+ + +Kubernetes core/v1.ServiceAffinity + + + +
+ +(Optional) + +

+Supports “ClientIP” and “None”. Used to maintain session affinity. +Enable client IP based session affinity. +Must be ClientIP or None. +Defaults to None. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`loadBalancerIP`
+ +string + + +
+ +(Optional) + +

+Only applies to Service Type: LoadBalancer. +This feature depends on whether the underlying cloud-provider supports specifying +the loadBalancerIP when a load balancer is created. +This field will be ignored if the cloud-provider does not support the feature. +Deprecated: This field was under-specified and its meaning varies across implementations. +Using it is non-portable and it may not support dual-stack. +Users are encouraged to use implementation-specific annotations when available. +

+ +
+ +`loadBalancerSourceRanges`
+ +[]string + + +
+ +(Optional) + +

+If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature.” +More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ +

+ +
+ +`externalName`
+ +string + + +
+ +(Optional) + +

+externalName is the external reference that discovery mechanisms will +return as an alias for this service (e.g. a DNS CNAME record). No +proxying will be involved. Must be a lowercase RFC-1123 hostname +(https://tools.ietf.org/html/rfc1123) and requires `type` to be “ExternalName”. +

+ +
+ +`externalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceExternalTrafficPolicy + + + +
+ +(Optional) + +

+externalTrafficPolicy describes how nodes distribute service traffic they +receive on one of the Service’s “externally-facing” addresses (NodePorts, +ExternalIPs, and LoadBalancer IPs). If set to “Local”, the proxy will configure +the service in a way that assumes that external load balancers will take care +of balancing the service traffic between nodes, and so each node will deliver +traffic only to the node-local endpoints of the service, without masquerading +the client source IP. (Traffic mistakenly sent to a node with no endpoints will +be dropped.) The default value, “Cluster”, uses the standard behavior of +routing to all endpoints evenly (possibly modified by topology and other +features). Note that traffic sent to an External IP or LoadBalancer IP from +within the cluster will always get “Cluster” semantics, but clients sending to +a NodePort from within the cluster may need to take traffic policy into account +when picking a node. +

+ +
+ +`healthCheckNodePort`
+ +int32 + + +
+ +(Optional) + +

+healthCheckNodePort specifies the healthcheck nodePort for the service. +This only applies when type is set to LoadBalancer and +externalTrafficPolicy is set to Local. If a value is specified, is +in-range, and is not in use, it will be used. If not specified, a value +will be automatically allocated. External systems (e.g. load-balancers) +can use this port to determine if a given node holds endpoints for this +service or not. If this field is specified when creating a Service +which does not need it, creation will fail. This field will be wiped +when updating a Service to no longer need it (e.g. changing type). +This field cannot be updated once set. +

+ +
+ +`publishNotReadyAddresses`
+ +bool + + +
+ +(Optional) + +

+publishNotReadyAddresses indicates that any agent which deals with endpoints for this +Service should disregard any indications of ready/not-ready. +The primary use case for setting this field is for a StatefulSet’s Headless Service to +propagate SRV DNS records for its Pods for the purpose of peer discovery. +The Kubernetes controllers that generate Endpoints and EndpointSlice resources for +Services interpret this to mean that all endpoints are considered “ready” even if the +Pods themselves are not. Agents which consume only Kubernetes generated endpoints +through the Endpoints or EndpointSlice resources can safely assume this behavior. +

+ +
+ +`sessionAffinityConfig`
+ + +Kubernetes core/v1.SessionAffinityConfig + + + +
+ +(Optional) + +

+sessionAffinityConfig contains the configurations of session affinity. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this +service. This field is usually assigned automatically based on cluster +configuration and the ipFamilyPolicy field. If this field is specified +manually, the requested family is available in the cluster, +and ipFamilyPolicy allows it, it will be used; otherwise creation of +the service will fail. This field is conditionally mutable: it allows +for adding or removing a secondary IP family, but it does not allow +changing the primary IP family of the Service. Valid values are “IPv4” +and “IPv6”. This field only applies to Services of types ClusterIP, +NodePort, and LoadBalancer, and does apply to “headless” services. +This field will be wiped when updating a Service to type ExternalName. +

+ +

+This field may hold a maximum of two entries (dual-stack families, in +either order). These families must correspond to the values of the +clusterIPs field, if specified. Both clusterIPs and ipFamilies are +governed by the ipFamilyPolicy field. +

+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+IPFamilyPolicy represents the dual-stack-ness requested or required by +this Service. If there is no value provided, then this field will be set +to SingleStack. Services can be “SingleStack” (a single IP family), +“PreferDualStack” (two IP families on dual-stack configured clusters or +a single IP family on single-stack clusters), or “RequireDualStack” +(two IP families on dual-stack configured clusters, otherwise fail). The +ipFamilies and clusterIPs fields depend on the value of this field. This +field will be wiped when updating a service to type ExternalName. +

+ +
+ +`allocateLoadBalancerNodePorts`
+ +bool + + +
+ +(Optional) + +

+allocateLoadBalancerNodePorts defines if NodePorts will be automatically +allocated for services with type LoadBalancer. Default is “true”. It +may be set to “false” if the cluster load-balancer does not rely on +NodePorts. If the caller requests specific NodePorts (by specifying a +value), those requests will be respected, regardless of this field. +This field may only be set for services with type LoadBalancer and will +be cleared if the type is changed to any other type. +

+ +
+ +`loadBalancerClass`
+ +string + + +
+ +(Optional) + +

+loadBalancerClass is the class of the load balancer implementation this Service belongs to. +If specified, the value of this field must be a label-style identifier, with an optional prefix, +e.g. “internal-vip” or “example.com/internal-vip”. Unprefixed names are reserved for end-users. +This field can only be set when the Service type is ‘LoadBalancer’. If not set, the default load +balancer implementation is used, today this is typically done through the cloud provider integration, +but should apply for any default implementation. If set, it is assumed that a load balancer +implementation is watching for Services with a matching class. Any default load balancer +implementation (e.g. cloud providers) should ignore Services that set this field. +This field can only be set when creating or updating a Service to type ‘LoadBalancer’. +Once set, it can not be changed. This field will be wiped when a service is updated to a non ‘LoadBalancer’ type. +

+ +
+ +`internalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceInternalTrafficPolicy + + + +
+ +(Optional) + +

+InternalTrafficPolicy describes how nodes distribute service traffic they +receive on the ClusterIP. If set to “Local”, the proxy will assume that pods +only want to talk to endpoints of the service on the same node as the pod, +dropping the traffic if there are no local endpoints. The default value, +“Cluster”, uses the standard behavior of routing to all endpoints evenly +(possibly modified by topology and other features). +

+ +
+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Extends the above `serviceSpec.selector` by allowing you to specify defined role as selector for the service. +When `roleSelector` is set, it adds a label selector “kubeblocks.io/role: {roleSelector}” +to the `serviceSpec.selector`. +Example usage: +

+
+
+  roleSelector: "leader"
+
+
+ +

+In this example, setting `roleSelector` to “leader” will add a label selector +“kubeblocks.io/role: leader” to the `serviceSpec.selector`. +This means that the service will select and route traffic to Pods with the label +“kubeblocks.io/role” set to “leader”. +

+ +

+Note that if `podService` sets to true, RoleSelector will be ignored. +The `podService` flag takes precedence over `roleSelector` and generates a service for each Pod. +

+ +
+

+ServiceDescriptorSpec + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorSpec defines the desired state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`podFQDNs`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the pod FQDNs of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+

+ServiceDescriptorStatus + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorStatus defines the observed state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been processed by the controller. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the current lifecycle phase of the ServiceDescriptor. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ServiceConnectionCredential. +

+ +
+

+ServiceRef + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the service reference declaration. +It corresponds to the serviceRefDeclaration name defined in either: +

+
    +
  • +`componentDefinition.spec.serviceRefDeclarations[*].name` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].serviceRefDeclarations[*].name` (deprecated) +
  • +
+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced Cluster or the namespace of the referenced ServiceDescriptor object. +If not provided, the referenced Cluster and ServiceDescriptor will be searched in the namespace of the current +Cluster by default. +

+ +
+ +`cluster`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the KubeBlocks Cluster being referenced. +This is used when services from another KubeBlocks Cluster are consumed. +

+ +

+By default, the referenced KubeBlocks Cluster’s `clusterDefinition.spec.connectionCredential` +will be utilized to bind to the current Component. This credential should include: +`endpoint`, `port`, `username`, and `password`. +

+ +

+Note: +

+
    +
  • +The `ServiceKind` and `ServiceVersion` specified in the service reference within the +ClusterDefinition are not validated when using this approach. +
  • +
  • +If both `cluster` and `serviceDescriptor` are present, `cluster` will take precedence. +
  • +
+ +

+Deprecated since v0.9 since `clusterDefinition.spec.connectionCredential` is deprecated, +use `clusterServiceSelector` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`clusterServiceSelector`
+ + +ServiceRefClusterSelector + + + +
+ +(Optional) + +

+References a service provided by another KubeBlocks Cluster. +It specifies the ClusterService and the account credentials needed for access. +

+ +
+ +`serviceDescriptor`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceDescriptor object that describes a service provided by external sources. +

+ +

+When referencing a service provided by external sources, a ServiceDescriptor object is required to establish +the service binding. +The `serviceDescriptor.spec.serviceKind` and `serviceDescriptor.spec.serviceVersion` should match the serviceKind +and serviceVersion declared in the definition. +

+ +

+If both `cluster` and `serviceDescriptor` are specified, the `cluster` takes precedence. +

+ +
+

+ServiceRefClusterSelector + +

+ +

+ +(Appears on:ServiceRef) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cluster`
+ +string + + +
+ + +

+The name of the Cluster being referenced. +

+ +
+ +`service`
+ + +ServiceRefServiceSelector + + + +
+ +(Optional) + +

+Identifies a ClusterService from the list of Services defined in `cluster.spec.services` of the referenced Cluster. +

+ +
+ +`podFQDNs`
+ + +ServiceRefPodFQDNsSelector + + + +
+ +(Optional) + +
+ +`credential`
+ + +ServiceRefCredentialSelector + + + +
+ +(Optional) + +

+Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. +The SystemAccount should be defined in `componentDefinition.spec.systemAccounts` +of the Component providing the service in the referenced Cluster. +

+ +
+

+ServiceRefCredentialSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the credential resides in. +

+ +
+ +`name`
+ +string + + +
+ + +

+The name of the credential (SystemAccount) to reference. +

+ +
+

+ServiceRefDeclaration + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ServiceRefDeclaration represents a reference to a service that can be either provided by a KubeBlocks Cluster +or an external service. +It acts as a placeholder for the actual service reference, which is determined later when a Cluster is created. +

+ +

+The purpose of ServiceRefDeclaration is to declare a service dependency without specifying the concrete details +of the service. +It allows for flexibility and abstraction in defining service references within a Component. +By using ServiceRefDeclaration, you can define service dependencies in a declarative manner, enabling loose coupling +and easier management of service references across different components and clusters. +

+ +

+Upon Cluster creation, the ServiceRefDeclaration is bound to an actual service through the ServiceRef field, +effectively resolving and connecting to the specified service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the ServiceRefDeclaration. +

+ +
+ +`serviceRefDeclarationSpecs`
+ + +[]ServiceRefDeclarationSpec + + + +
+ + +

+Defines a list of constraints and requirements for services that can be bound to this ServiceRefDeclaration +upon Cluster creation. +Each ServiceRefDeclarationSpec defines a ServiceKind and ServiceVersion, +outlining the acceptable service types and versions that are compatible. +

+ +

+This flexibility allows a ServiceRefDeclaration to be fulfilled by any one of the provided specs. +For example, if it requires an OLTP database, specs for both MySQL and PostgreSQL are listed, +either MySQL or PostgreSQL services can be used when binding. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the service reference can be optional. +

+ +

+For an optional service-ref, the component can still be created even if the service-ref is not provided. +

+ +
+

+ServiceRefDeclarationSpec + +

+ +

+ +(Appears on:ServiceRefDeclaration) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Specifies the type or nature of the service. This should be a well-known application cluster type, such as +{mysql, redis, mongodb}. +The field is case-insensitive and supports abbreviations for some well-known databases. +For instance, both `zk` and `zookeeper` are considered as a ZooKeeper cluster, while `pg`, `postgres`, `postgresql` +are all recognized as a PostgreSQL cluster. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Defines the service version of the service reference. This is a regular expression that matches a version number pattern. +For instance, `^8.0.8$`, `8.0.\d{1,2}$`, `^[v\-]*?(\d{1,2}\.){0,3}\d{1,2}$` are all valid patterns. +

+ +
+

+ServiceRefPodFQDNsSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the pods reside in. +

+ +
+ +`role`
+ +string + + +
+ +(Optional) + +

+The role of the pods to reference. +

+ +
+

+ServiceRefServiceSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ +(Optional) + +

+The name of the Component where the Service resides in. +

+ +

+It is required when referencing a Component’s Service. +

+ +
+ +`service`
+ +string + + +
+ + +

+The name of the Service to be referenced. +

+ +

+Leave it empty to reference the default Service. Set it to “headless” to reference the default headless Service. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name,service2.name… +

+ +
+ +`port`
+ +string + + +
+ +(Optional) + +

+The port name of the Service to be referenced. +

+ +

+If there is a non-zero node-port exist for the matched Service port, the node-port will be selected first. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ServiceRefVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceRefVarSelector selects a var from a ServiceRefDeclaration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The ServiceRefDeclaration to select from. +

+ +
+ +`ServiceRefVars`
+ + +ServiceRefVars + + + +
+ + +

+ +(Members of `ServiceRefVars` are embedded into this type.) + +

+ +
+

+ServiceRefVars + +

+ +

+ +(Appears on:ServiceRefVarSelector) + +

+
+ +

+ServiceRefVars defines the vars that can be referenced from a ServiceRef. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`endpoint`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`port`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+ServiceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceVarSelector selects a var from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Service to select from. +It can be referenced from the default headless service by setting the name to “headless”. +

+ +
+ +`ServiceVars`
+ + +ServiceVars + + + +
+ + +

+ +(Members of `ServiceVars` are embedded into this type.) + +

+ +
+

+ServiceVars + +

+ +

+ +(Appears on:ServiceVarSelector) + +

+
+ +

+ServiceVars defines the vars that can be referenced from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceType`
+ + +VarOption + + + +
+ +(Optional) + +

+ServiceType references the type of the service. +

+ +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`loadBalancer`
+ + +VarOption + + + +
+ +(Optional) + +

+LoadBalancer represents the LoadBalancer ingress point of the service. +

+ +

+If multiple ingress points are available, the first one will be used automatically, choosing between IP and Hostname. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Port references a port or node-port defined in the service. +

+ +

+If the referenced service is a pod-service, there will be multiple service objects matched, +and the value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ShardingDefinitionSpec + +

+ +

+ +(Appears on:ShardingDefinition) + +

+
+ +

+ShardingDefinitionSpec defines the desired state of ShardingDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`template`
+ + +ShardingTemplate + + + +
+ + +

+This field is immutable. +

+ +
+ +`shardsLimit`
+ + +ShardsLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of shards supported by the sharding. +

+ +

+This field is immutable. +

+ +
+ +`provisionStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for provisioning shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ShardingLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a sharding throughout its lifecycle. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]ShardingSystemAccount + + + +
+ +(Optional) + +

+Defines the system accounts for the sharding. +

+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +ShardingTLS + + + +
+ +(Optional) + +

+Defines the TLS for the sharding. +

+ +

+This field is immutable. +

+ +
+

+ShardingDefinitionStatus + +

+ +

+ +(Appears on:ShardingDefinition) + +

+
+ +

+ShardingDefinitionStatus defines the observed state of ShardingDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ShardingDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ShardingDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ShardingDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ShardingLifecycleActions + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+ +

+ShardingLifecycleActions defines a collection of Actions for customizing the behavior of a sharding. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a sharding’s creation. +

+ +

+By setting `postProvision.preCondition`, you can determine the specific lifecycle stage at which +the action should trigger, available conditions for sharding include: `Immediately`, `ComponentReady`, +and `ClusterReady`. For sharding, the `ComponentReady` condition means all components of the sharding are ready. +

+ +

+With `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a sharding. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a terminate operation for the sharding is initiated. +The actual termination and cleanup of the sharding and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`shardAdd`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a shard added. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`shardRemove`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to remove a shard. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ShardingSystemAccount + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account defined in the sharding template. +

+ +

+This field is immutable once set. +

+ +
+ +`shared`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the account is shared across all shards in the sharding. +

+ +
+

+ShardingTLS + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`shared`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the TLS configuration is shared across all shards in the sharding. +

+ +
+

+ShardingTemplate + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ + +

+The component definition(s) that the sharding is based on. +

+ +

+The component definition can be specified using one of the following: +

+
    +
  • +the full name +
  • +
  • +the regular expression pattern (‘^’ will be added to the beginning of the pattern automatically) +
  • +
+ +

+This field is immutable. +

+ +
+

+ShardsLimit + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+ +

+ShardsLimit defines the valid range of number of shards supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minShards`
+ +int32 + + +
+ + +

+The minimum limit of shards. +

+ +
+ +`maxShards`
+ +int32 + + +
+ + +

+The maximum limit of shards. +

+ +
+

+Sidecar + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the sidecar. +

+ +

+The name will be used as the name of the sidecar container in the Pod. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the exact component definition that the sidecar belongs to. +

+ +

+A sidecar will be updated when the owner component definition is updated only. +

+ +
+ +`sidecarDef`
+ +string + + +
+ + +

+Specifies the sidecar definition CR to be used to create the sidecar. +

+ +
+

+SidecarDefinitionSpec + +

+ +

+ +(Appears on:SidecarDefinition) + +

+
+ +

+SidecarDefinitionSpec defines the desired state of SidecarDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the sidecar. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the component definition that the sidecar belongs to. +

+ +

+For a specific cluster object, if there is any components provided by the component definition of @owner, +the sidecar will be created and injected into the components which are provided by +the component definition of @selectors automatically. +

+ +

+This field is immutable. +

+ +
+ +`selectors`
+ +[]string + + +
+ + +

+Specifies the component definition of components that the sidecar along with. +

+ +

+This field is immutable. +

+ +
+ +`containers`
+ + +[]Kubernetes core/v1.Container + + + +
+ + +

+List of containers for the sidecar. +

+ +

+Cannot be updated. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are needed by the sidecar. +

+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the configuration file templates used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the scripts used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+

+SidecarDefinitionStatus + +

+ +

+ +(Appears on:SidecarDefinition) + +

+
+ +

+SidecarDefinitionStatus defines the observed state of SidecarDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the SidecarDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the SidecarDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the SidecarDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`owners`
+ +string + + +
+ +(Optional) + +

+Resolved owners of the SidecarDefinition. +

+ +
+ +`selectors`
+ +string + + +
+ +(Optional) + +

+Resolved selectors of the SidecarDefinition. +

+ +
+

+SystemAccount + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the unique identifier for the account. This name is used by other entities to reference the account. +

+ +

+This field is immutable once set. +

+ +
+ +`initAccount`
+ +bool + + +
+ +(Optional) + +

+Indicates if this account is a system initialization account (e.g., MySQL root). +

+ +

+This field is immutable once set. +

+ +
+ +`statement`
+ + +SystemAccountStatement + + + +
+ +(Optional) + +

+Defines the statements used to create, delete, and update the account. +

+ +

+This field is immutable once set. +

+ +
+ +`passwordGenerationPolicy`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+

+SystemAccountStatement + +

+ +

+ +(Appears on:SystemAccount) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`create`
+ +string + + +
+ +(Optional) + +

+The statement to create a new account with the necessary privileges. +

+ +

+This field is immutable once set. +

+ +
+ +`delete`
+ +string + + +
+ +(Optional) + +

+The statement to delete a account. +

+ +

+This field is immutable once set. +

+ +
+ +`update`
+ +string + + +
+ +(Optional) + +

+The statement to update an existing account. +

+ +

+This field is immutable once set. +

+ +
+

+TLS + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumeName`
+ +string + + +
+ + +

+Specifies the volume name for the TLS secret. +The controller will create a volume object with the specified name and add it to the pod when the TLS is enabled. +

+ +

+This field is immutable once set. +

+ +
+ +`mountPath`
+ +string + + +
+ + +

+Specifies the mount path for the TLS secret to be mounted. +Similar to the volume, the controller will mount the created volume to the specified path within containers when the TLS is enabled. +

+ +

+This field is immutable once set. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The permissions for the mounted path. Defaults to 0600. +

+ +

+This field is immutable once set. +

+ +
+ +`caFile`
+ +string + + +
+ +(Optional) + +

+The CA file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+ +`certFile`
+ +string + + +
+ +(Optional) + +

+The certificate file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+ +`keyFile`
+ +string + + +
+ +(Optional) + +

+The key file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+

+TLSConfig + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enable`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, +such as specifying TLS certificates and keys, to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+

+TLSSecretRef + +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+TLSSecretRef defines the Secret that contains TLS certs. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+The namespace where the secret is located. +If not provided, the secret is assumed to be in the same namespace as the Cluster object. +

+ +
+ +`name`
+ +string + + +
+ + +

+Name of the Secret that contains user-provided certificates. +

+ +
+ +`ca`
+ +string + + +
+ + +

+Key of CA cert in Secret +

+ +
+ +`cert`
+ +string + + +
+ + +

+Key of Cert in Secret +

+ +
+ +`key`
+ +string + + +
+ + +

+Key of TLS private key in Secret +

+ +
+

+TLSVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+TLSVarSelector selects a var from the TLS. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`TLSVars`
+ + +TLSVars + + + +
+ + +

+ +(Members of `TLSVars` are embedded into this type.) + +

+ +
+

+TLSVars + +

+ +

+ +(Appears on:TLSVarSelector) + +

+
+ +

+TLSVars defines the vars that can be referenced from the TLS. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ + +VarOption + + + +
+ +(Optional) + +
+

+TargetPodSelector +(`string` alias) +

+ +

+ +(Appears on:ExecAction) + +

+
+ +

+TargetPodSelector defines how to select pod(s) to execute an Action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+ +

+"Ordinal" +

+
+ +
+ +

+"Role" +

+
+ +
+

+TerminationPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec, ComponentSpec) + +

+
+ +

+TerminationPolicyType defines termination policy types. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+Delete will delete all runtime resources belong to the cluster. +

+ +
+ +

+"DoNotTerminate" +

+
+ +

+DoNotTerminate will block delete operation. +

+ +
+ +

+"WipeOut" +

+
+ +

+WipeOut is based on Delete and wipe out all volume snapshots and snapshot data from backup storage location. +

+ +
+

+UpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:ComponentDefinitionSpec, ShardingDefinitionSpec) + +

+
+ +

+UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied +across the cluster. +The available strategies are `Serial`, `BestEffortParallel`, and `Parallel`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +

+BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making +a best-effort attempt to update as many replicas as possible concurrently +while maintaining the component’s availability. +Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number +of replicas remain available during the update process to maintain the component’s quorum and functionality. +

+ +

+For example, consider a component with 5 replicas. To maintain the component’s availability and quorum, +the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least +3 replicas (a quorum) remain available and functional during the update process. +

+ +

+The `BestEffortParallel` strategy strikes a balance between update speed and component availability. +

+ +
+ +

+"Parallel" +

+
+ +

+ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component. +The replicas are updated in parallel, with the operator updating all replicas concurrently. +This strategy provides the fastest update time but may lead to a period of reduced availability or +capacity during the update process. +

+ +
+ +

+"Serial" +

+
+ +

+SerialStrategy indicates that updates are applied one at a time in a sequential manner. +The operator waits for each replica to be updated and ready before proceeding to the next one. +This ensures that only one replica is unavailable at a time during the update process. +

+ +
+

+VarOption +(`string` alias) +

+ +

+ +(Appears on:ClusterVars, ComponentVars, CredentialVars, NamedVar, ResourceVars, RoledVar, ServiceRefVars, ServiceVars, TLSVars) + +

+
+ +

+VarOption defines whether a variable is required or optional. +

+
+

+VarSource + +

+ +

+ +(Appears on:EnvVar) + +

+
+ +

+VarSource represents a source for the value of an EnvVar. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+ +`hostNetworkVarRef`
+ + +HostNetworkVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of host-network resources. +

+ +
+ +`serviceVarRef`
+ + +ServiceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Service. +

+ +
+ +`credentialVarRef`
+ + +CredentialVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Credential (SystemAccount). +

+ +
+ +`tlsVarRef`
+ + +TLSVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of the TLS. +

+ +
+ +`serviceRefVarRef`
+ + +ServiceRefVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a ServiceRef. +

+ +
+ +`resourceVarRef`
+ + +ResourceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a kind of resource. +

+ +
+ +`componentVarRef`
+ + +ComponentVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Component. +

+ +
+ +`clusterVarRef`
+ + +ClusterVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Cluster. +

+ +
+
+

apps.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+Cluster + +

+
+ +

+Cluster offers a unified management interface for a wide variety of database and storage systems: +

+
    +
  • +Relational databases: MySQL, PostgreSQL, MariaDB +
  • +
  • +NoSQL databases: Redis, MongoDB +
  • +
  • +KV stores: ZooKeeper, etcd +
  • +
  • +Analytics systems: ElasticSearch, OpenSearch, ClickHouse, Doris, StarRocks, Solr +
  • +
  • +Message queues: Kafka, Pulsar +
  • +
  • +Distributed SQL: TiDB, OceanBase +
  • +
  • +Vector databases: Qdrant, Milvus, Weaviate +
  • +
  • +Object storage: Minio +
  • +
+ +

+KubeBlocks utilizes an abstraction layer to encapsulate the characteristics of these diverse systems. +A Cluster is composed of multiple Components, each defined by vendors or KubeBlocks Addon developers via ComponentDefinition, +arranged in Directed Acyclic Graph (DAG) topologies. +The topologies, defined in a ClusterDefinition, coordinate reconciliation across Cluster’s lifecycle phases: +Creating, Running, Updating, Stopping, Stopped, Deleting. +Lifecycle management ensures that each Component operates in harmony, executing appropriate actions at each lifecycle stage. +

+ +

+For sharded-nothing architecture, the Cluster supports managing multiple shards, +each shard managed by a separate Component, supporting dynamic resharding. +

+ +

+The Cluster object is aimed to maintain the overall integrity and availability of a database cluster, +serves as the central control point, abstracting the complexity of multiple-component management, +and providing a unified interface for cluster-wide operations. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Cluster` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterDefinitionRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`clusterVersionRef`
+ +string + + +
+ +(Optional) + +

+Refers to the ClusterVersion name. +

+ +

+Deprecated since v0.9, use ComponentVersion instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), +allowing for data preservation while stopping other operations. +Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. +
  • +
  • +`Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while +removing all persistent data. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`shardingSpecs`
+ + +[]ShardingSpec + + + +
+ +(Optional) + +

+Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. +Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ShardingSpec. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Defines a set of node affinity scheduling rules for the Cluster’s Pods. +This field helps control the placement of Pods on nodes within the Cluster. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+An array that specifies tolerations attached to the Cluster’s Pods, +allowing them to be scheduled onto nodes with matching taints. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Describes how Pods are distributed across node. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`availabilityPolicy`
+ + +AvailabilityPolicyType + + + +
+ +(Optional) + +

+Describes the availability policy, including zone, node, and none. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the replicas of the first componentSpec, if the replicas of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`resources`
+ + +ClusterResources + + + +
+ +(Optional) + +

+Specifies the resources of the first componentSpec, if the resources of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`storage`
+ + +ClusterStorage + + + +
+ +(Optional) + +

+Specifies the storage of the first componentSpec, if the storage of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`network`
+ + +ClusterNetwork + + + +
+ +(Optional) + +

+The configuration of network. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +
+ +`status`
+ + +ClusterStatus + + + +
+ + +
+

+ClusterDefinition + +

+
+ +

+ClusterDefinition defines the topology for databases or storage systems, +offering a variety of topological configurations to meet diverse deployment needs and scenarios. +

+ +

+It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. +For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, +simplifying the setup of new systems. +

+ +

+Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, +ensuring a controlled and predictable management of component lifecycles. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ClusterDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`type`
+ +string + + +
+ +(Optional) + +

+Specifies the well-known database type, such as mysql, redis, or mongodb. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDefs`
+ + +[]ClusterComponentDefinition + + + +
+ +(Optional) + +

+Provides the definitions for the cluster components. +

+ +

+Deprecated since v0.9. +Components should now be individually defined using ComponentDefinition and +collectively referenced via `topology.components`. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`connectionCredential`
+ +map[string]string + + +
+ +(Optional) + +

+Connection credential template used for creating a connection credential secret for cluster objects. +

+ +

+Built-in objects are: +

+
    +
  • +`$(RANDOM_PASSWD)` random 8 characters. +
  • +
  • +`$(STRONG_RANDOM_PASSWD)` random 16 characters, with mixed cases, digits and symbols. +
  • +
  • +`$(UUID)` generate a random UUID v4 string. +
  • +
  • +`$(UUID_B64)` generate a random UUID v4 BASE64 encoded string. +
  • +
  • +`$(UUID_STR_B64)` generate a random UUID v4 string then BASE64 encoded. +
  • +
  • +`$(UUID_HEX)` generate a random UUID v4 HEX representation. +
  • +
  • +`$(HEADLESS_SVC_FQDN)` headless service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_FQDN)` service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_PORT_{PORT-NAME})` is ServicePort’s port value with specified port name, i.e, a servicePort JSON struct: +`{"name": "mysql", "targetPort": "mysqlContainerPort", "port": 3306}`, and `$(SVC_PORT_mysql)` in the +connection credential value is 3306. +
  • +
+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+ +
+ +`status`
+ + +ClusterDefinitionStatus + + + +
+ + +
+

+Component + +

+
+ +

+Component is a fundamental building block of a Cluster object. +For example, a Redis Cluster can include Components like ‘redis’, ‘sentinel’, and potentially a proxy like ‘twemproxy’. +

+ +

+The Component object is responsible for managing the lifecycle of all replicas within a Cluster component, +It supports a wide range of operations including provisioning, stopping, restarting, termination, upgrading, +configuration changes, vertical and horizontal scaling, failover, switchover, cross-node migration, +scheduling configuration, exposing Services, managing system accounts, enabling/disabling exporter, +and configuring log collection. +

+ +

+Component is an internal sub-object derived from the user-submitted Cluster object. +It is designed primarily to be used by the KubeBlocks controllers, +users are discouraged from modifying Component objects directly and should use them only for monitoring Component statuses. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Component` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition and exposes endpoints that can be accessed +by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Cluster. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}”, +bound to a default role defined during KubeBlocks installation. +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the Cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +
+ +`status`
+ + +ComponentStatus + + + +
+ + +
+

+ComponentDefinition + +

+
+ +

+ComponentDefinition serves as a reusable blueprint for creating Components, +encapsulating essential static settings such as Component description, +Pod templates, configuration file templates, scripts, parameter lists, +injected environment variables and their sources, and event handlers. +ComponentDefinition works in conjunction with dynamic settings from the ClusterComponentSpec, +to instantiate Components during Cluster creation. +

+ +

+Key aspects that can be defined in a ComponentDefinition include: +

+
    +
  • +PodSpec template: Specifies the PodSpec template used by the Component. +
  • +
  • +Configuration templates: Specify the configuration file templates required by the Component. +
  • +
  • +Scripts: Provide the necessary scripts for Component management and operations. +
  • +
  • +Storage volumes: Specify the storage volumes and their configurations for the Component. +
  • +
  • +Pod roles: Outlines various roles of Pods within the Component along with their capabilities. +
  • +
  • +Exposed Kubernetes Services: Specify the Services that need to be exposed by the Component. +
  • +
  • +System accounts: Define the system accounts required for the Component. +
  • +
  • +Monitoring and logging: Configure the exporter and logging settings for the Component. +
  • +
+ +

+ComponentDefinitions also enable defining reactive behaviors of the Component in response to events, +such as member join/leave, Component addition/deletion, role changes, switch over, and more. +This allows for automatic event handling, thus encapsulating complex behaviors within the Component. +

+ +

+Referencing a ComponentDefinition when creating individual Components ensures inheritance of predefined configurations, +promoting reusability and consistency across different deployments and cluster topologies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the configuration file templates and volume mount parameters used by the Component. +It also includes descriptions of the parameters in the ConfigMaps, such as value range limitations. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ configuration files. +Each template is represented as a ConfigMap and may contain multiple configuration files, +with each file being a key in the ConfigMap. +

+ +

+The rendered configuration files will be mounted into the Component’s containers +according to the specified volume mount parameters. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+Each script group is encapsulated in a ComponentTemplateSpec that includes: +

+
    +
  • +The ConfigMap containing the scripts. +
  • +
  • +The mount point where the scripts will be mounted inside the container. +
  • +
+ +

+This field is immutable. +

+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+Note: This field is currently non-functional and is reserved for future implementation. +

+ +

+This field is immutable. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero to multiple roles. +KubeBlocks operator determines the roles of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns a list of roles for each replica, and the returned roles must be predefined in the `roles` field. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`roleArbitrator`
+ + +RoleArbitrator + + + +
+ +(Optional) + +

+This field has been deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +
+ +`status`
+ + +ComponentDefinitionStatus + + + +
+ + +
+

+ComponentVersion + +

+
+ +

+ComponentVersion is the Schema for the componentversions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentVersion` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentVersionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+ +
+ +`status`
+ + +ComponentVersionStatus + + + +
+ + +
+

+ConfigConstraint + +

+
+ +

+ConfigConstraint manages the parameters across multiple configuration files contained in a single configure template. +These configuration files should have the same format (e.g. ini, xml, properties, json). +

+ +

+It provides the following functionalities: +

+
    +
  1. +Parameter Value Validation: Validates and ensures compliance of parameter values with defined constraints. +
  2. +
  3. +Dynamic Reload on Modification: Monitors parameter changes and triggers dynamic reloads to apply updates. +
  4. +
  5. +Parameter Rendering in Templates: Injects parameters into templates to generate up-to-date configuration files. +
  6. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ConfigConstraint` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigConstraintSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`reloadOptions`
+ + +ReloadOptions + + + +
+ +(Optional) + +

+Specifies the dynamic reload action supported by the engine. +When set, the controller executes the method defined here to execute hot parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `reloadStaticParamsBeforeRestart` is set to true, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadOptions` is set. +
  4. +
+ +

+If `reloadOptions` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+reloadOptions:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`dynamicActionCanBeMerged`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadOptions` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “true” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`toolsImageSpec`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`downwardAPIOptions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`scriptConfigs`
+ + +[]ScriptConfig + + + +
+ +(Optional) + +

+A list of ScriptConfig Object. +

+ +

+Each ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload +and DownwardAction to perform specific tasks or configurations. +

+ +
+ +`cfgSchemaTopLevelName`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configurationSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configurationSchema.cue’. +

+ +
+ +`configurationSchema`
+ + +CustomParametersValidation + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `selector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `selector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+ +`formatterConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+formatterConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +
+ +`status`
+ + +ConfigConstraintStatus + + + +
+ + +
+

+Configuration + +

+
+ +

+Configuration represents the complete set of configurations for a specific Component of a Cluster. +This includes templates for each configuration file, their corresponding ConfigConstraints, volume mounts, +and other relevant details. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Configuration` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigurationSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`clusterRef`
+ +string + + +
+ + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigurationItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigurationItemDetail objects. +

+ +

+Each ConfigurationItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigurationItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+ +
+ +`status`
+ + +ConfigurationStatus + + + +
+ + +
+

+ServiceDescriptor + +

+
+ +

+ServiceDescriptor describes a service provided by external sources. +It contains the necessary details such as the service’s address and connection credentials. +To enable a Cluster to access this service, the ServiceDescriptor’s name should be specified +in the Cluster configuration under `clusterComponent.serviceRefs[*].serviceDescriptor`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ServiceDescriptor` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ServiceDescriptorSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+ +
+ +`status`
+ + +ServiceDescriptorStatus + + + +
+ + +
+

+AccessMode +(`string` alias) +

+ +

+ +(Appears on:ConsensusMember) + +

+
+ +

+AccessMode defines the modes of access granted to the SVC. +The modes can be `None`, `Readonly`, or `ReadWrite`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"None" +

+
+ +

+None implies no access. +

+ +
+ +

+"ReadWrite" +

+
+ +

+ReadWrite permits both read and write operations. +

+ +
+ +

+"Readonly" +

+
+ +

+Readonly allows only read operations. +

+ +
+

+AccountName +(`string` alias) +

+ +

+ +(Appears on:SystemAccountConfig) + +

+
+ +

+AccountName defines system account names. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"kbadmin" +

+
+ +
+ +

+"kbdataprotection" +

+
+ +
+ +

+"kbmonitoring" +

+
+ +
+ +

+"kbprobe" +

+
+ +
+ +

+"kbreplicator" +

+
+ +
+

+Action + +

+ +

+ +(Appears on:ComponentSwitchover, LifecycleActionHandler, Probe) + +

+
+ +

+Action defines a customizable hook or procedure tailored for different database engines, +designed to be invoked at predetermined points within the lifecycle of a Component instance. +It provides a modular and extensible way to customize a Component’s behavior through the execution of defined actions. +

+ +

+Available Action triggers include: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as during planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: Defines the procedure to transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+Actions can be executed in different ways: +

+
    +
  • +ExecAction: Executes a command inside a container. +which may run as a K8s job or be executed inside the Lorry sidecar container, depending on the implementation. +Future implementations will standardize execution within Lorry. +A set of predefined environment variables are available and can be leveraged within the `exec.command` +to access context information such as details about pods, components, the overall cluster state, +or database connection credentials. +These variables provide a dynamic and context-aware mechanism for script execution. +
  • +
  • +HTTPAction: Performs an HTTP request. +HTTPAction is to be implemented in future version. +
  • +
  • +GRPCAction: In future version, Actions will support initiating gRPC calls. +This allows developers to implement Actions using plugins written in programming language like Go, +providing greater flexibility and extensibility. +
  • +
+ +

+An action is considered successful on returning 0, or HTTP 200 for status HTTP(s) Actions. +Any other return value or HTTP status codes indicate failure, +and the action may be retried based on the configured retry policy. +

+
    +
  • +If an action exceeds the specified timeout duration, it will be terminated, and the action is considered failed. +
  • +
  • +If an action produces any data as output, it should be written to stdout, +or included in the HTTP response payload for HTTP(s) actions. +
  • +
  • +If an action encounters any errors, error messages should be written to stderr, +or detailed in the HTTP response with the appropriate non-200 status code. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the container image to be used for running the Action. +

+ +

+When specified, a dedicated container will be created using this image to execute the Action. +This field is mutually exclusive with the `container` field; only one of them should be provided. +

+ +

+This field cannot be updated. +

+ +
+ +`exec`
+ + +ExecAction + + + +
+ +(Optional) + +

+Defines the command to run. +

+ +

+This field cannot be updated. +

+ +
+ +`http`
+ + +HTTPAction + + + +
+ +(Optional) + +

+Specifies the HTTP request to perform. +

+ +

+This field cannot be updated. +

+ +

+Note: HTTPAction is to be implemented in future version. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Represents a list of environment variables that will be injected into the container. +These variables enable the container to adapt its behavior based on the environment it’s running in. +

+ +

+This field cannot be updated. +

+ +
+ +`targetPodSelector`
+ + +TargetPodSelector + + + +
+ +(Optional) + +

+Defines the criteria used to select the target Pod(s) for executing the Action. +This is useful when there is no default target replica identified. +It allows for precise control over which Pod(s) the Action should run in. +

+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`matchingKey`
+ +string + + +
+ +(Optional) + +

+Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. +The impact of this field depends on the `targetPodSelector` value: +

+
    +
  • +When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. +
  • +
  • +When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` +will be selected for the Action. +
  • +
+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`container`
+ +string + + +
+ +(Optional) + +

+Defines the name of the container within the target Pod where the action will be executed. +

+ +

+This name must correspond to one of the containers defined in `componentDefinition.spec.runtime`. +If this field is not specified, the default behavior is to use the first container listed in +`componentDefinition.spec.runtime`. +

+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration in seconds that the Action is allowed to run. +

+ +

+If the Action does not complete within this time frame, it will be terminated. +

+ +

+This field cannot be updated. +

+ +
+ +`retryPolicy`
+ + +RetryPolicy + + + +
+ +(Optional) + +

+Defines the strategy to be taken when retrying the Action after a failure. +

+ +

+It specifies the conditions under which the Action should be retried and the limits to apply, +such as the maximum number of retries and backoff strategy. +

+ +

+This field cannot be updated. +

+ +
+ +`preCondition`
+ + +PreConditionType + + + +
+ +(Optional) + +

+Specifies the state that the cluster must reach before the Action is executed. +Currently, this is only applicable to the `postProvision` action. +

+ +

+The conditions are as follows: +

+
    +
  • +`Immediately`: Executed right after the Component object is created. +The readiness of the Component and its resources is not guaranteed at this stage. +
  • +
  • +`RuntimeReady`: The Action is triggered after the Component object has been created and all associated +runtime resources (e.g. Pods) are in a ready state. +
  • +
  • +`ComponentReady`: The Action is triggered after the Component itself is in a ready state. +This process does not affect the readiness state of the Component or the Cluster. +
  • +
  • +`ClusterReady`: The Action is executed after the Cluster is in a ready state. +This execution does not alter the Component or the Cluster’s state of readiness. +
  • +
+ +

+This field cannot be updated. +

+ +
+

+Affinity + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podAntiAffinity`
+ + +PodAntiAffinity + + + +
+ +(Optional) + +

+Specifies the anti-affinity level of Pods within a Component. +It determines how pods should be spread across nodes to improve availability and performance. +It can have the following values: `Preferred` and `Required`. +The default value is `Preferred`. +

+ +
+ +`topologyKeys`
+ +[]string + + +
+ +(Optional) + +

+Represents the key of node labels used to define the topology domain for Pod anti-affinity +and Pod spread constraints. +

+ +

+In K8s, a topology domain is a set of nodes that have the same value for a specific label key. +Nodes with labels containing any of the specified TopologyKeys and identical values are considered +to be in the same topology domain. +

+ +

+Note: The concept of topology in the context of K8s TopologyKeys is different from the concept of +topology in the ClusterDefinition. +

+ +

+When a Pod has anti-affinity or spread constraints specified, Kubernetes will attempt to schedule the +Pod on nodes with different values for the specified TopologyKeys. +This ensures that Pods are spread across different topology domains, promoting high availability and +reducing the impact of node failures. +

+ +

+Some well-known label keys, such as `kubernetes.io/hostname` and `topology.kubernetes.io/zone`, +are often used as TopologyKey. +These keys represent the hostname and zone of a node, respectively. +By including these keys in the TopologyKeys list, Pods will be spread across nodes with +different hostnames or zones. +

+ +

+In addition to the well-known keys, users can also specify custom label keys as TopologyKeys. +This allows for more flexible and custom topology definitions based on the specific needs +of the application or environment. +

+ +

+The TopologyKeys field is a slice of strings, where each string represents a label key. +The order of the keys in the slice does not matter. +

+ +
+ +`nodeLabels`
+ +map[string]string + + +
+ +(Optional) + +

+Indicates the node labels that must be present on nodes for pods to be scheduled on them. +It is a map where the keys are the label keys and the values are the corresponding label values. +Pods will only be scheduled on nodes that have all the specified labels with the corresponding values. +

+ +

+For example, if NodeLabels is set to {“nodeType”: “ssd”, “environment”: “production”}, +pods will only be scheduled on nodes that have both the “nodeType” label with value “ssd” +and the “environment” label with value “production”. +

+ +

+This field allows users to control Pod placement based on specific node labels. +It can be used to ensure that Pods are scheduled on nodes with certain characteristics, +such as specific hardware (e.g., SSD), environment (e.g., production, staging), +or any other custom labels assigned to nodes. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Determines the level of resource isolation between Pods. +It can have the following values: `SharedNode` and `DedicatedNode`. +

+
    +
  • +SharedNode: Allow that multiple Pods may share the same node, which is the default behavior of K8s. +
  • +
  • +DedicatedNode: Each Pod runs on a dedicated node, ensuring that no two Pods share the same node. +In other words, if a Pod is already running on a node, no other Pods will be scheduled on that node. +Which provides a higher level of isolation and resource guarantee for Pods. +
  • +
+ +

+The default value is `SharedNode`. +

+ +
+

+AvailabilityPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+AvailabilityPolicyType defines the type of availability policy to be applied for cluster affinity, influencing how +resources are distributed across zones or nodes for high availability and resilience. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"node" +

+
+ +

+AvailabilityPolicyNode specifies that resources should be distributed across different nodes within the same zone. +This policy aims to provide resilience against node failures, ensuring that the failure of a single node does not +impact the overall service availability. +

+ +
+ +

+"none" +

+
+ +

+AvailabilityPolicyNone specifies that no specific availability policy is applied. +Resources may not be explicitly distributed for high availability, potentially concentrating them in a single +zone or node based on other scheduling decisions. +

+ +
+ +

+"zone" +

+
+ +

+AvailabilityPolicyZone specifies that resources should be distributed across different availability zones. +This policy aims to ensure high availability and protect against zone failures, spreading the resources to reduce +the risk of simultaneous downtime. +

+ +
+

+BackupStatusUpdateStage +(`string` alias) +

+
+ +

+BackupStatusUpdateStage defines the stage of backup status update. +

+
+

+BaseBackupType +(`string` alias) +

+
+ +

+BaseBackupType the base backup type, keep synchronized with the BaseBackupType of the data protection API. +

+
+

+BuiltinActionHandlerType +(`string` alias) +

+ +

+ +(Appears on:LifecycleActionHandler) + +

+
+ +

+BuiltinActionHandlerType defines build-in action handlers provided by Lorry, including: +

+
    +
  • +`mysql` +
  • +
  • +`wesql` +
  • +
  • +`oceanbase` +
  • +
  • +`redis` +
  • +
  • +`mongodb` +
  • +
  • +`etcd` +
  • +
  • +`postgresql` +
  • +
  • +`vanilla-postgresql` +
  • +
  • +`apecloud-postgresql` +
  • +
  • +`polardbx` +
  • +
  • +`custom` +
  • +
  • +`unknown` +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"apecloud-postgresql" +

+
+ +
+ +

+"custom" +

+
+ +
+ +

+"etcd" +

+
+ +
+ +

+"mongodb" +

+
+ +
+ +

+"mysql" +

+
+ +
+ +

+"oceanbase" +

+
+ +
+ +

+"polardbx" +

+
+ +
+ +

+"postgresql" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"unknown" +

+
+ +
+ +

+"vanilla-postgresql" +

+
+ +
+ +

+"wesql" +

+
+ +
+

+ClassDefRef + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+ClassDefRef is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ComponentClassDefinition. +

+ +
+ +`class`
+ +string + + +
+ + +

+Defines the name of the class that is defined in the ComponentClassDefinition. +

+ +
+

+ClusterBackup + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether automated backup is enabled for the Cluster. +

+ +
+ +`retentionPeriod`
+ +github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.RetentionPeriod + + +
+ +(Optional) + +

+Determines the duration to retain backups. Backups older than this period are automatically removed. +

+ +

+For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +Default value is 7d. +

+ +
+ +`method`
+ +string + + +
+ + +

+Specifies the backup method to use, as defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Specifies the maximum time in minutes that the system will wait to start a missed backup job. +If the scheduled backup time is missed for any reason, the backup job must start within this deadline. +Values must be between 0 (immediate execution) and 1440 (one day). +

+ +
+ +`repoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the backupRepo. If not set, the default backupRepo will be used. +

+ +
+ +`pitrEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable point-in-time recovery. +

+ +
+ +`continuousMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the backup method to use, if not set, use the first continuous method. +

+ +
+ +`incrementalBackupEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable incremental backup. +

+ +
+ +`incrementalCronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the incremental backup schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+

+ClusterComponentConfig + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+ClusterComponentConfig represents a config with its source bound. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the config. +

+ +
+ +`ClusterComponentConfigSource`
+ + +ClusterComponentConfigSource + + + +
+ + +

+ +(Members of `ClusterComponentConfigSource` are embedded into this type.) + +

+ +

+The source of the config. +

+ +
+

+ClusterComponentConfigSource + +

+ +

+ +(Appears on:ClusterComponentConfig) + +

+
+ +

+ClusterComponentConfigSource represents the source of a config. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ +(Optional) + +

+ConfigMap source for the config. +

+ +
+

+ClusterComponentDefinition + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterComponentDefinition defines a Component within a ClusterDefinition but is deprecated and +has been replaced by ComponentDefinition. +

+ +

+Deprecated: Use ComponentDefinition instead. This type is deprecated as of version 0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+This name could be used as default name of `cluster.spec.componentSpecs.name`, and needs to conform with same +validation rules as `cluster.spec.componentSpecs.name`, currently complying with IANA Service Naming rule. +This name will apply to cluster objects as the value of label “apps.kubeblocks.io/component-name”. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Description of the component definition. +

+ +
+ +`workloadType`
+ + +WorkloadType + + + +
+ + +

+Defines the type of the workload. +

+
    +
  • +`Stateless` describes stateless applications. +
  • +
  • +`Stateful` describes common stateful applications. +
  • +
  • +`Consensus` describes applications based on consensus protocols, such as raft and paxos. +
  • +
  • +`Replication` describes applications based on the primary-secondary data replication protocol. +
  • +
+ +
+ +`characterType`
+ +string + + +
+ +(Optional) + +

+Defines well-known database component name, such as mongos(mongodb), proxy(redis), mariadb(mysql). +

+ +
+ +`configSpecs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Defines the template of configurations. +

+ +
+ +`scriptSpecs`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Defines the template of scripts. +

+ +
+ +`probes`
+ + +ClusterDefinitionProbes + + + +
+ +(Optional) + +

+Settings for health checks. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Specify the logging files which can be observed and configured by cluster users. +

+ +
+ +`podSpec`
+ + +Kubernetes core/v1.PodSpec + + + +
+ +(Optional) + +

+Defines the pod spec template of component. +

+ +
+ +`service`
+ + +ServiceSpec + + + +
+ +(Optional) + +

+Defines the service spec. +

+ +
+ +`statelessSpec`
+ + +StatelessSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Stateless` workloads. +

+ +
+ +`statefulSpec`
+ + +StatefulSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Stateful` workloads. +

+ +
+ +`consensusSpec`
+ + +ConsensusSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Consensus` workloads. It’s required if the workload type is `Consensus`. +

+ +
+ +`replicationSpec`
+ + +ReplicationSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Replication` workloads. +

+ +
+ +`rsmSpec`
+ + +RSMSpec + + + +
+ +(Optional) + +

+Defines workload spec of this component. +From KB 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which powers all kinds of workload in KB. +RSM is an enhanced stateful workload extension dedicated for heavy-state workloads like databases. +

+ +
+ +`horizontalScalePolicy`
+ + +HorizontalScalePolicy + + + +
+ +(Optional) + +

+Defines the behavior of horizontal scale. +

+ +
+ +`systemAccounts`
+ + +SystemAccountSpec + + + +
+ +(Optional) + +

+Defines system accounts needed to manage the component, and the statement to create them. +

+ +
+ +`volumeTypes`
+ + +[]VolumeTypeSpec + + + +
+ +(Optional) + +

+Used to describe the purpose of the volumes mapping the name of the VolumeMounts in the PodSpec.Container field, +such as data volume, log volume, etc. When backing up the volume, the volume can be correctly backed up according +to the volumeType. +

+ +

+For example: +

+
    +
  • +`name: data, type: data` means that the volume named `data` is used to store `data`. +
  • +
  • +`name: binlog, type: log` means that the volume named `binlog` is used to store `log`. +
  • +
+ +

+NOTE: When volumeTypes is not defined, the backup function will not be supported, even if a persistent volume has +been specified. +

+ +
+ +`customLabelSpecs`
+ + +[]CustomLabelSpec + + + +
+ +(Optional) + +

+Used for custom label tags which you want to add to the component resources. +

+ +
+ +`switchoverSpec`
+ + +SwitchoverSpec + + + +
+ +(Optional) + +

+Defines command to do switchover. +In particular, when workloadType=Replication, the command defined in switchoverSpec will only be executed under +the condition of cluster.componentSpecs[x].SwitchPolicy.type=Noop. +

+ +
+ +`postStartSpec`
+ + +PostStartAction + + + +
+ +(Optional) + +

+Defines the command to be executed when the component is ready, and the command will only be executed once after +the component becomes ready. +

+ +
+ +`volumeProtectionSpec`
+ + +VolumeProtectionSpec + + + +
+ +(Optional) + +

+Defines settings to do volume protect. +

+ +
+ +`componentDefRef`
+ + +[]ComponentDefRef + + + +
+ +(Optional) + +

+Used to inject values from other components into the current component. Values will be saved and updated in a +configmap and mounted to the current component. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Used to declare the service reference of the current component. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the metrics exporter. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+

+ClusterComponentPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+ +

+ClusterComponentPhase defines the phase of a cluster component as represented in cluster.status.components.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterCompPhase indicates the component has more than zero replicas, but there are some failed pods. +The component is functioning, but it is in a fragile state. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterCompPhase indicates the component is being created. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterCompPhase indicates the component is currently being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterCompPhase indicates the component has more than zero replicas, but there are some failed pods. +The component is not functioning. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterCompPhase indicates the component has more than zero replicas, and all pods are up-to-date and +in a ‘Running’ state. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterCompPhase indicates the component has zero replicas, and all pods have been deleted. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterCompPhase indicates the component has zero replicas, and there are pods that are terminating. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterCompPhase indicates the component has more than zero replicas, and there are no failed pods, +it is currently being updated. +

+ +
+

+ClusterComponentService + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+References the ComponentService name defined in the `componentDefinition.spec.services[*].name`. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP` allocates a Cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort` builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the ClusterIP. +
  • +
  • +`LoadBalancer` builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the ClusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for ClusterComponentService. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to generate individual Services for each Pod. +If set to true, a separate Service will be created for each Pod in the Cluster. +

+ +
+

+ClusterComponentSpec + +

+ +

+ +(Appears on:ClusterSpec, ShardingSpec) + +

+
+ +

+ClusterComponentSpec defines the specification of a Component within a Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the Component’s name. +It’s part of the Service DNS name and must comply with the IANA service naming rule. +The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), +but required otherwise. +

+ +
+ +`componentDefRef`
+ +string + + +
+ +(Optional) + +

+References a ClusterComponentDefinition defined in the `clusterDefinition.spec.componentDef` field. +Must comply with the IANA service naming rule. +

+ +

+Deprecated since v0.9, +because defining Components in `clusterDefinition.spec.componentDef` field has been deprecated. +This field is replaced by the `componentDef` field, use `componentDef` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+If both `componentDefRef` and `componentDef` are provided, +the `componentDef` will take precedence over `componentDefRef`. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`classDefRef`
+ + +ClassDefRef + + + +
+ +(Optional) + +

+References the class defined in ComponentClassDefinition. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Component. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +These environment variables will be placed after the environment variables declared in the Pod. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the K8s cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ClusterComponentService + + + +
+ +(Optional) + +

+Overrides services defined in referenced ComponentDefinition and expose endpoints that can be accessed by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`switchPolicy`
+ + +ClusterSwitchPolicy + + + +
+ +(Optional) + +

+Defines the strategy for switchover and failover. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`tls`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys, +to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +To perform certain operational tasks, agent sidecars running in Pods require specific RBAC permissions. +The service account will be bound to a default role named “kubeblocks-cluster-pod-role” which is installed together with KubeBlocks. +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}” +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Defines the update strategy for the Component. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`userResourceRefs`
+ + +UserResourceRefs + + + +
+ +(Optional) + +

+Allows users to specify custom ConfigMaps and Secrets to be mounted as volumes +in the Cluster’s Pods. +This is useful in scenarios where users need to provide additional resources to the Cluster, such as: +

+
    +
  • +Mounting custom scripts or configuration files during Cluster startup. +
  • +
  • +Mounting Secrets as volumes to provide sensitive information, like S3 AK/SK, to the Cluster. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`monitor`
+ +bool + + +
+ +(Optional) + +

+Deprecated since v0.9 +Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ClusterComponentStatus + +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterComponentStatus records Component status. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ClusterComponentPhase + + + +
+ + +

+Specifies the current state of the Component. +

+ +
+ +`message`
+ + +ComponentMessageMap + + + +
+ +(Optional) + +

+Records detailed information about the Component in its current phase. +The keys are either podName, deployName, or statefulSetName, formatted as ‘ObjectKind/Name’. +

+ +
+ +`podsReady`
+ +bool + + +
+ +(Optional) + +

+Checks if all Pods of the Component are ready. +

+ +
+ +`podsReadyTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Indicates the time when all Component Pods became ready. +This is the readiness time of the last Component Pod. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Represents the status of the members. +

+ +
+

+ClusterComponentVolumeClaimTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refers to the name of a volumeMount defined in either: +

+
    +
  • +`componentDefinition.spec.runtime.containers[*].volumeMounts` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].podSpec.containers[*].volumeMounts` (deprecated) +
  • +
+ +

+The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the labels for the PVC of the volume. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the annotations for the PVC of the volume. +

+ +
+ +`spec`
+ + +PersistentVolumeClaimSpec + + + +
+ +(Optional) + +

+Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume +with the mount name specified in the `name` field. +

+ +

+When a Pod is created for this ClusterComponent, a new PVC will be created based on the specification +defined in the `spec` field. The PVC will be associated with the volume mount specified by the `name` field. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +
+

+ClusterDefinitionProbe + +

+ +

+ +(Appears on:ClusterDefinitionProbes) + +

+
+ +

+ClusterDefinitionProbe is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`periodSeconds`
+ +int32 + + +
+ + +

+How often (in seconds) to perform the probe. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ + +

+Number of seconds after which the probe times out. Defaults to 1 second. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +

+ +
+ +`commands`
+ + +ClusterDefinitionProbeCMDs + + + +
+ +(Optional) + +

+Commands used to execute for probe. +

+ +
+

+ClusterDefinitionProbeCMDs + +

+ +

+ +(Appears on:ClusterDefinitionProbe) + +

+
+ +

+ClusterDefinitionProbeCMDs is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`writes`
+ +[]string + + +
+ +(Optional) + +

+Defines write checks that are executed on the probe sidecar. +

+ +
+ +`queries`
+ +[]string + + +
+ +(Optional) + +

+Defines read checks that are executed on the probe sidecar. +

+ +
+

+ClusterDefinitionProbes + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ClusterDefinitionProbes is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`runningProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the running status of the component. +

+ +
+ +`statusProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the status of the component. +

+ +
+ +`roleProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the role of the component. +

+ +
+ +`roleProbeTimeoutAfterPodsReady`
+ +int32 + + +
+ +(Optional) + +

+Defines the timeout (in seconds) for the role probe after all pods of the component are ready. +The system will check if the application is available in the pod. +If pods exceed the InitializationTimeoutSeconds time without a role label, this component will enter the +Failed/Abnormal phase. +

+ +

+Note that this configuration will only take effect if the component supports RoleProbe +and will not affect the life cycle of the pod. default values are 60 seconds. +

+ +
+

+ClusterDefinitionSpec + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionSpec defines the desired state of ClusterDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ +string + + +
+ +(Optional) + +

+Specifies the well-known database type, such as mysql, redis, or mongodb. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDefs`
+ + +[]ClusterComponentDefinition + + + +
+ +(Optional) + +

+Provides the definitions for the cluster components. +

+ +

+Deprecated since v0.9. +Components should now be individually defined using ComponentDefinition and +collectively referenced via `topology.components`. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`connectionCredential`
+ +map[string]string + + +
+ +(Optional) + +

+Connection credential template used for creating a connection credential secret for cluster objects. +

+ +

+Built-in objects are: +

+
    +
  • +`$(RANDOM_PASSWD)` random 8 characters. +
  • +
  • +`$(STRONG_RANDOM_PASSWD)` random 16 characters, with mixed cases, digits and symbols. +
  • +
  • +`$(UUID)` generate a random UUID v4 string. +
  • +
  • +`$(UUID_B64)` generate a random UUID v4 BASE64 encoded string. +
  • +
  • +`$(UUID_STR_B64)` generate a random UUID v4 string then BASE64 encoded. +
  • +
  • +`$(UUID_HEX)` generate a random UUID v4 HEX representation. +
  • +
  • +`$(HEADLESS_SVC_FQDN)` headless service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_FQDN)` service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_PORT_{PORT-NAME})` is ServicePort’s port value with specified port name, i.e, a servicePort JSON struct: +`{"name": "mysql", "targetPort": "mysqlContainerPort", "port": 3306}`, and `$(SVC_PORT_mysql)` in the +connection credential value is 3306. +
  • +
+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+

+ClusterDefinitionStatus + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionStatus defines the observed state of ClusterDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this ClusterDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the ClusterDefinition. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the ClusterDefinition is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`topologies`
+ +string + + +
+ +(Optional) + +

+Topologies this ClusterDefinition supported. +

+ +
+ +`serviceRefs`
+ +string + + +
+ +(Optional) + +

+The service references declared by this ClusterDefinition. +

+ +
+

+ClusterNetwork + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterNetwork is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`hostNetworkAccessible`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the host network can be accessed. By default, this is set to false. +

+ +
+ +`publiclyAccessible`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the network is accessible to the public. By default, this is set to false. +

+ +
+

+ClusterObjectReference + +

+ +

+ +(Appears on:ComponentVarSelector, CredentialVarSelector, HostNetworkVarSelector, ServiceRefVarSelector, ServiceVarSelector) + +

+
+ +

+ClusterObjectReference defines information to let you locate the referenced object inside the same Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) used by the component that the referent object resident in. +

+ +

+If not specified, the component itself will be used. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Name of the referent object. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specify whether the object must be defined. +

+ +
+ +`multipleClusterObjectOption`
+ + +MultipleClusterObjectOption + + + +
+ +(Optional) + +

+This option defines the behavior when multiple component objects match the specified @CompDef. +If not provided, an error will be raised when handling multiple matches. +

+ +
+

+ClusterPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterPhase defines the phase of the Cluster within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterPhase represents some components are in `Failed` or `Abnormal` phase, indicates that the cluster +is in a fragile state and troubleshooting is required. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterPhase represents all components are in `Creating` phase. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterPhase indicates the cluster is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterPhase represents all components are in `Failed` phase, indicates that the cluster is unavailable. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterPhase represents all components are in `Running` phase, indicates that the cluster is functioning properly. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterPhase represents all components are in `Stopped` phase, indicates that the cluster has stopped and +is not providing any functionality. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterPhase represents at least one component is in `Stopping` phase, indicates that the cluster is in +the process of stopping. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterPhase represents all components are in `Creating`, `Running` or `Updating` phase, and at least one +component is in `Creating` or `Updating` phase, indicates that the cluster is undergoing an update. +

+ +
+

+ClusterResources + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterResources is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cpu`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of CPU resource the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+ +`memory`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of memory resource the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+

+ClusterService + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterService defines a service that is exposed externally, allowing entities outside the cluster to access it. +For example, external applications, or other Clusters. +And another Cluster managed by the same KubeBlocks operator can resolve the address exposed by a ClusterService +using the `serviceRef` field. +

+ +

+When a Component needs to access another Cluster’s ClusterService using the `serviceRef` field, +it must also define the service type and version information in the `componentDefinition.spec.serviceRefDeclarations` +section. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`shardingSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of a sharding name, which is defined in +`cluster.spec.shardingSpecs[*].name`, to be used as a selector for the service. +Note that this and the `componentSelector` are mutually exclusive and cannot be set simultaneously. +

+ +
+ +`componentSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of a component, to be used as a selector for the service. +Note that this and the `shardingSelector` are mutually exclusive and cannot be set simultaneously. +

+ +
+

+ClusterSpec + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterSpec defines the desired state of Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterDefinitionRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`clusterVersionRef`
+ +string + + +
+ +(Optional) + +

+Refers to the ClusterVersion name. +

+ +

+Deprecated since v0.9, use ComponentVersion instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), +allowing for data preservation while stopping other operations. +Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. +
  • +
  • +`Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while +removing all persistent data. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`shardingSpecs`
+ + +[]ShardingSpec + + + +
+ +(Optional) + +

+Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. +Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ShardingSpec. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Defines a set of node affinity scheduling rules for the Cluster’s Pods. +This field helps control the placement of Pods on nodes within the Cluster. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+An array that specifies tolerations attached to the Cluster’s Pods, +allowing them to be scheduled onto nodes with matching taints. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Describes how Pods are distributed across node. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`availabilityPolicy`
+ + +AvailabilityPolicyType + + + +
+ +(Optional) + +

+Describes the availability policy, including zone, node, and none. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the replicas of the first componentSpec, if the replicas of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`resources`
+ + +ClusterResources + + + +
+ +(Optional) + +

+Specifies the resources of the first componentSpec, if the resources of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`storage`
+ + +ClusterStorage + + + +
+ +(Optional) + +

+Specifies the storage of the first componentSpec, if the storage of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`network`
+ + +ClusterNetwork + + + +
+ +(Optional) + +

+The configuration of network. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+

+ClusterStatus + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterStatus defines the observed state of the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the Cluster object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ClusterPhase + + + +
+ +(Optional) + +

+The current phase of the Cluster includes: +`Creating`, `Running`, `Updating`, `Stopping`, `Stopped`, `Deleting`, `Failed`, `Abnormal`. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1alpha1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all Components within the Cluster. +

+ +
+ +`clusterDefGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number of the referenced ClusterDefinition. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Cluster object. +Each condition in the list provides real-time information about certain aspect of the Cluster object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Cluster. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ClusterStorage + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterStorage is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`size`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of storage the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+

+ClusterSwitchPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+ClusterSwitchPolicy defines the switch policy for a Cluster. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +SwitchPolicyType + + + +
+ +(Optional) + +

+Type specifies the type of switch policy to be applied. +

+ +
+

+ClusterTopology + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterTopology represents the definition for a specific cluster topology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the unique identifier for the cluster topology. +Cannot be updated. +

+ +
+ +`components`
+ + +[]ClusterTopologyComponent + + + +
+ + +

+Components specifies the components in the topology. +

+ +
+ +`orders`
+ + +ClusterTopologyOrders + + + +
+ +(Optional) + +

+Specifies the sequence in which components within a cluster topology are +started, stopped, and upgraded. +This ordering is crucial for maintaining the correct dependencies and operational flow across components. +

+ +
+ +`default`
+ +bool + + +
+ +(Optional) + +

+Default indicates whether this topology serves as the default configuration. +When set to true, this topology is automatically used unless another is explicitly specified. +

+ +
+

+ClusterTopologyComponent + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyComponent defines a Component within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the component within the cluster topology. +It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+Cannot be updated once set. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+The system selects the ComponentDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ComponentDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ComponentDefinition CR +by specifying a name prefix or regular expression pattern. +
  4. +
+ +

+Once set, this field cannot be updated. +

+ +
+

+ClusterTopologyOrders + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyOrders manages the lifecycle of components within a cluster by defining their provisioning, +terminating, and updating sequences. +It organizes components into stages or groups, where each group indicates a set of components +that can be managed concurrently. +These groups are processed sequentially, allowing precise control based on component dependencies and requirements. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provision`
+ +[]string + + +
+ +(Optional) + +

+Specifies the order for creating and initializing components. +This is designed for components that depend on one another. Components without dependencies can be grouped together. +

+ +

+Components that can be provisioned independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`terminate`
+ +[]string + + +
+ +(Optional) + +

+Outlines the order for stopping and deleting components. +This sequence is designed for components that require a graceful shutdown or have interdependencies. +

+ +

+Components that can be terminated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`update`
+ +[]string + + +
+ +(Optional) + +

+Update determines the order for updating components’ specifications, such as image upgrades or resource scaling. +This sequence is designed for components that have dependencies or require specific update procedures. +

+ +

+Components that can be updated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+

+CmdExecutorConfig + +

+ +

+ +(Appears on:PostStartAction, SwitchoverAction, SystemAccountSpec) + +

+
+ +

+CmdExecutorConfig specifies how to perform creation and deletion statements. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`CommandExecutorEnvItem`
+ + +CommandExecutorEnvItem + + + +
+ + +

+ +(Members of `CommandExecutorEnvItem` are embedded into this type.) + +

+ +
+ +`CommandExecutorItem`
+ + +CommandExecutorItem + + + +
+ + +

+ +(Members of `CommandExecutorItem` are embedded into this type.) + +

+ +
+

+CommandExecutorEnvItem + +

+ +

+ +(Appears on:CmdExecutorConfig) + +

+
+ +

+CommandExecutorEnvItem is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Specifies the image used to execute the command. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+A list of environment variables that will be injected into the command execution context. +

+ +
+

+CommandExecutorItem + +

+ +

+ +(Appears on:CmdExecutorConfig) + +

+
+ +

+CommandExecutorItem is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+The command to be executed. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Additional parameters used in the execution of the command. +

+ +
+

+ComponentConfigSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec, ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentTemplateSpec`
+ + +ComponentTemplateSpec + + + +
+ + +

+ +(Members of `ComponentTemplateSpec` are embedded into this type.) + +

+ +
+ +`keys`
+ +[]string + + +
+ +(Optional) + +

+Specifies the configuration files within the ConfigMap that support dynamic updates. +

+ +

+A configuration template (provided in the form of a ConfigMap) may contain templates for multiple +configuration files. +Each configuration file corresponds to a key in the ConfigMap. +Some of these configuration files may support dynamic modification and reloading without requiring +a pod restart. +

+ +

+If empty or omitted, all configuration files in the ConfigMap are assumed to support dynamic updates, +and ConfigConstraint applies to all keys. +

+ +
+ +`legacyRenderedConfigSpec`
+ + +LegacyRenderedTemplateSpec + + + +
+ +(Optional) + +

+Specifies the secondary rendered config spec for pod-specific customization. +

+ +

+The template is rendered inside the pod (by the “config-manager” sidecar container) and merged with the main +template’s render result to generate the final configuration file. +

+ +

+This field is intended to handle scenarios where different pods within the same Component have +varying configurations. It allows for pod-specific customization of the configuration. +

+ +

+Note: This field will be deprecated in future versions, and the functionality will be moved to +`cluster.spec.componentSpecs[*].instances[*]`. +

+ +
+ +`constraintRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced configuration constraints object. +

+ +
+ +`asEnvFrom`
+ +[]string + + +
+ +(Optional) + +

+Specifies the containers to inject the ConfigMap parameters as environment variables. +

+ +

+This is useful when application images accept parameters through environment variables and +generate the final configuration file in the startup script based on these variables. +

+ +

+This field allows users to specify a list of container names, and KubeBlocks will inject the environment +variables converted from the ConfigMap into these designated containers. This provides a flexible way to +pass the configuration items from the ConfigMap to the container without modifying the image. +

+ +

+Deprecated: `asEnvFrom` has been deprecated since 0.9.0 and will be removed in 0.10.0. +Use `injectEnvTo` instead. +

+ +
+ +`injectEnvTo`
+ +[]string + + +
+ +(Optional) + +

+Specifies the containers to inject the ConfigMap parameters as environment variables. +

+ +

+This is useful when application images accept parameters through environment variables and +generate the final configuration file in the startup script based on these variables. +

+ +

+This field allows users to specify a list of container names, and KubeBlocks will inject the environment +variables converted from the ConfigMap into these designated containers. This provides a flexible way to +pass the configuration items from the ConfigMap to the container without modifying the image. +

+ +
+ +`reRenderResourceTypes`
+ + +[]RerenderResourceType + + + +
+ +(Optional) + +

+Specifies whether the configuration needs to be re-rendered after v-scale or h-scale operations to reflect changes. +

+ +

+In some scenarios, the configuration may need to be updated to reflect the changes in resource allocation +or cluster topology. Examples: +

+
    +
  • +Redis: adjust maxmemory after v-scale operation. +
  • +
  • +MySQL: increase max connections after v-scale operation. +
  • +
  • +Zookeeper: update zoo.cfg with new node addresses after h-scale operation. +
  • +
+ +
+ +`asSecret`
+ +bool + + +
+ +(Optional) + +

+Whether to store the final rendered parameters as a secret. +

+ +
+

+ComponentDefRef + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ComponentDefRef is used to select the component and its fields to be referenced. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDefName`
+ +string + + +
+ + +

+The name of the componentDef to be selected. +

+ +
+ +`failurePolicy`
+ + +FailurePolicyType + + + +
+ +(Optional) + +

+Defines the policy to be followed in case of a failure in finding the component. +

+ +
+ +`componentRefEnv`
+ + +[]ComponentRefEnv + + + +
+ +(Optional) + +

+The values that are to be injected as environment variables into each component. +

+ +
+

+ComponentDefinitionSpec + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the configuration file templates and volume mount parameters used by the Component. +It also includes descriptions of the parameters in the ConfigMaps, such as value range limitations. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ configuration files. +Each template is represented as a ConfigMap and may contain multiple configuration files, +with each file being a key in the ConfigMap. +

+ +

+The rendered configuration files will be mounted into the Component’s containers +according to the specified volume mount parameters. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+Each script group is encapsulated in a ComponentTemplateSpec that includes: +

+
    +
  • +The ConfigMap containing the scripts. +
  • +
  • +The mount point where the scripts will be mounted inside the container. +
  • +
+ +

+This field is immutable. +

+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+Note: This field is currently non-functional and is reserved for future implementation. +

+ +

+This field is immutable. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero to multiple roles. +KubeBlocks operator determines the roles of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns a list of roles for each replica, and the returned roles must be predefined in the `roles` field. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`roleArbitrator`
+ + +RoleArbitrator + + + +
+ +(Optional) + +

+This field has been deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+

+ComponentDefinitionStatus + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+ +

+ComponentDefinitionStatus defines the observed state of ComponentDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ComponentDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ComponentDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ComponentDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ComponentLifecycleActions + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentLifecycleActions defines a collection of Actions for customizing the behavior of a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a component’s creation. +

+ +

+By setting `postProvision.customHandler.preCondition`, you can determine the specific lifecycle stage +at which the action should trigger: `Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +with `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • + +

    +KB_CLUSTER_POD_IP_LIST: Comma-separated list of the cluster’s pod IP addresses (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_NAME_LIST: Comma-separated list of the cluster’s pod names (e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_NAME_LIST: Comma-separated list of host names, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_IP_LIST: Comma-separated list of host IP addresses, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_NAME_LIST: Comma-separated list of all pod names within the component +(e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_IP_LIST: Comma-separated list of pod IP addresses, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_NAME_LIST: Comma-separated list of host names for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_IP_LIST: Comma-separated list of host IP addresses for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_LIST: Comma-separated list of all cluster components (e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_DELETING_LIST: Comma-separated list of components that are currently being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_UNDELETED_LIST: Comma-separated list of components that are not being deleted +(e.g., “comp1,comp2”). +

    +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a component. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a scale-down operation for the Component is initiated. +The actual termination and cleanup of the Component and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • + +

    +KB_CLUSTER_POD_IP_LIST: Comma-separated list of the cluster’s pod IP addresses (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_NAME_LIST: Comma-separated list of the cluster’s pod names (e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_NAME_LIST: Comma-separated list of host names, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_IP_LIST: Comma-separated list of host IP addresses, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_NAME_LIST: Comma-separated list of all pod names within the component +(e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_IP_LIST: Comma-separated list of pod IP addresses, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_NAME_LIST: Comma-separated list of host names for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_IP_LIST: Comma-separated list of host IP addresses for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_LIST: Comma-separated list of all cluster components (e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_DELETING_LIST: Comma-separated list of components that are currently being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_UNDELETED_LIST: Comma-separated list of components that are not being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_IS_SCALING_IN: Indicates whether the component is currently scaling in. +If this variable is present and set to “true”, it denotes that the component is undergoing a scale-in operation. +During scale-in, data rebalancing is necessary to maintain cluster integrity. +Contrast this with a cluster deletion scenario where data rebalancing is not required as the entire cluster +is being cleaned up. +

    +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the role of replicas. +

+ +

+This action is periodically triggered by Lorry at the specified interval to determine the role of each replica. +Upon successful execution, the action’s output designates the role of the replica, +which should match one of the predefined role names within `componentDefinition.spec.roles`. +The output is then compared with the previous successful execution result. +If a role change is detected, an event is generated to inform the controller, +which initiates an update of the replica’s role. +

+ +

+Defining a RoleProbe Action for a Component is required if roles are defined for the Component. +It ensures replicas are correctly labeled with their respective roles. +Without this, services that rely on roleSelectors might improperly direct traffic to wrong replicas. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the Pod whose role is being assessed. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected output of this action: +- On Success: The determined role of the replica, which must align with one of the roles specified + in the component definition. +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`switchover`
+ + +ComponentSwitchover + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +during events such as planned maintenance or when performing stop, shutdown, restart, or upgrade operations +involving the current leader node. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SWITCHOVER_CANDIDATE_NAME: The name of the pod for the new leader candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CANDIDATE_FQDN: The FQDN of the new leader candidate’s pod, which may not be specified (empty). +
  • +
  • +KB_LEADER_POD_IP: The IP address of the current leader’s pod prior to the switchover. +
  • +
  • +KB_LEADER_POD_NAME: The name of the current leader’s pod prior to the switchover. +
  • +
  • +KB_LEADER_POD_FQDN: The FQDN of the current leader’s pod prior to the switchover. +
  • +
+ +

+The environment variables with the following prefixes are deprecated and will be removed in future releases: +

+
    +
  • +KB_REPLICATION_PRIMARYPOD +
  • +
  • +KB_CONSENSUS_LEADERPOD +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberJoin`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to add a new replica to the replication group. +

+ +

+This action is initiated after a replica pod becomes ready. +

+ +

+The role of the replica (e.g., primary, secondary) will be determined and assigned as part of the action command +implementation, or automatically by the database kernel or a sidecar utility like Patroni that implements +a consensus algorithm. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
  • +KB_PRIMARY_POD_FQDN: The FQDN of the primary Pod within the replication group. +
  • +
  • +KB_MEMBER_ADDRESSES: A comma-separated list of Pod addresses for all replicas in the group. +
  • +
  • +KB_NEW_MEMBER_POD_NAME: The pod name of the replica being added to the group. +
  • +
  • +KB_NEW_MEMBER_POD_IP: The IP address of the replica being added to the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message detailing the reason for any failure encountered +during the addition of the new member. +

+ +

+For example, to add a new OBServer to an OceanBase Cluster in ‘zone1’, the following command may be used: +

+
+
+command:
+- bash
+- -c
+- |
+   ADDRESS=$(KB_MEMBER_ADDRESSES%%,*)
+   HOST=$(echo $ADDRESS | cut -d ':' -f 1)
+   PORT=$(echo $ADDRESS | cut -d ':' -f 2)
+   CLIENT="mysql -u $KB_SERVICE_USER -p$KB_SERVICE_PASSWORD -P $PORT -h $HOST -e"
+       $CLIENT "ALTER SYSTEM ADD SERVER '$KB_NEW_MEMBER_POD_IP:$KB_SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberLeave`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to remove a replica from the replication group. +

+ +

+This action is initiated before remove a replica from the group. +The operator will wait for MemberLeave to complete successfully before releasing the replica and cleaning up +related Kubernetes resources. +

+ +

+The process typically includes updating configurations and informing other group members about the removal. +Data migration is generally not part of this action and should be handled separately if needed. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
  • +KB_PRIMARY_POD_FQDN: The FQDN of the primary Pod within the replication group. +
  • +
  • +KB_MEMBER_ADDRESSES: A comma-separated list of Pod addresses for all replicas in the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_NAME: The pod name of the replica being removed from the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_IP: The IP address of the replica being removed from the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+For example, to remove an OBServer from an OceanBase Cluster in ‘zone1’, the following command can be executed: +

+
+
+command:
+- bash
+- -c
+- |
+   ADDRESS=$(KB_MEMBER_ADDRESSES%%,*)
+   HOST=$(echo $ADDRESS | cut -d ':' -f 1)
+   PORT=$(echo $ADDRESS | cut -d ':' -f 2)
+   CLIENT="mysql -u $KB_SERVICE_USER  -p$KB_SERVICE_PASSWORD -P $PORT -h $HOST -e"
+       $CLIENT "ALTER SYSTEM DELETE SERVER '$KB_LEAVE_MEMBER_POD_IP:$KB_SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readonly`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to switch a replica into the read-only state. +

+ +

+Use Case: +This action is invoked when the database’s volume capacity nears its upper limit and space is about to be exhausted. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readwrite`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to transition a replica from the read-only state back to the read-write state. +

+ +

+Use Case: +This action is used to bring back a replica that was previously in a read-only state, +which restricted write operations, to its normal operational state where it can handle +both read and write operations. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataDump`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure for exporting the data from a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Applicability: +Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+The output should be a valid data dump streamed to stdout. It must exclude any irrelevant information to ensure +that only the necessary data is exported for import into the new replica. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataLoad`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure for importing data into a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+Data should be received through stdin. If any error occurs during the process, +the action must be able to guarantee idempotence to allow for retries from the beginning. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`reconfigure`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure that update a replica with new configuration. +

+ +

+Note: This field is immutable once it has been set. +

+ +

+This Action is reserved for future versions. +

+ +
+ +`accountProvision`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to generate a new database account. +

+ +

+Use Case: +This action is designed to create system accounts that are utilized for replication, monitoring, backup, +and other administrative tasks. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ComponentMessageMap +(`map[string]string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+
+

+ComponentRefEnv + +

+ +

+ +(Appears on:ComponentDefRef) + +

+
+ +

+ComponentRefEnv specifies name and value of an env. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the env, it must be a C identifier. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+The value of the env. +

+ +
+ +`valueFrom`
+ + +ComponentValueFrom + + + +
+ +(Optional) + +

+The source from which the value of the env. +

+ +
+

+ComponentService + +

+ +

+ +(Appears on:ComponentDefinitionSpec, ComponentSpec) + +

+
+ +

+ComponentService defines a service that would be exposed as an inter-component service within a Cluster. +A Service defined in the ComponentService is expected to be accessed by other Components within the same Cluster. +

+ +

+When a Component needs to use a ComponentService provided by another Component within the same Cluster, +it can declare a variable in the `componentDefinition.spec.vars` section and bind it to the specific exposed address +of the ComponentService using the `serviceVarRef` field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to create a corresponding Service for each Pod of the selected Component. +When set to true, a set of Services will be automatically generated for each Pod, +and the `roleSelector` field will be ignored. +

+ +

+The names of the generated Services will follow the same suffix naming pattern: `$(serviceName)-$(podOrdinal)`. +The total number of generated Services will be equal to the number of replicas specified for the Component. +

+ +

+Example usage: +

+
+
+name: my-service
+serviceName: my-service
+podService: true
+disableAutoProvision: true
+spec:
+  type: NodePort
+  ports:
+  - name: http
+    port: 80
+    targetPort: 8080
+
+
+ +

+In this example, if the Component has 3 replicas, three Services will be generated: +- my-service-0: Points to the first Pod (podOrdinal: 0) +- my-service-1: Points to the second Pod (podOrdinal: 1) +- my-service-2: Points to the third Pod (podOrdinal: 2) +

+ +

+Each generated Service will have the specified spec configuration and will target its respective Pod. +

+ +

+This feature is useful when you need to expose each Pod of a Component individually, allowing external access +to specific instances of the Component. +

+ +
+ +`disableAutoProvision`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the automatic provisioning of the service should be disabled. +

+ +

+If set to true, the service will not be automatically created at the component provisioning. +Instead, you can enable the creation of this service by specifying it explicitly in the cluster API. +

+ +
+

+ComponentSpec + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentSpec defines the desired state of Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition and exposes endpoints that can be accessed +by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Cluster. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}”, +bound to a default role defined during KubeBlocks installation. +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the Cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ComponentStatus + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentStatus represents the observed state of a Component within the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Specifies the most recent generation observed for this Component object. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Component object. +Each condition in the list provides real-time information about certain aspect of the Component object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Component. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+ +`phase`
+ + +ClusterComponentPhase + + + +
+ + +

+Indicates the current phase of the Component, with each phase indicating specific conditions: +

+
    +
  • +Creating: The initial phase for new Components, transitioning from ‘empty’(“”). +
  • +
  • +Running: All Pods in a Running state. +
  • +
  • +Updating: The Component is currently being updated, with no failed Pods present. +
  • +
  • +Abnormal: Some Pods have failed, indicating a potentially unstable state. +However, the cluster remains available as long as a quorum of members is functioning. +
  • +
  • +Failed: A significant number of Pods or critical Pods have failed +The cluster may be non-functional or may offer only limited services (e.g, read-only). +
  • +
  • +Stopping: All Pods are being terminated, with current replica count at zero. +
  • +
  • +Stopped: All associated Pods have been successfully deleted. +
  • +
  • +Deleting: The Component is being deleted. +
  • +
+ +
+ +`message`
+ + +ComponentMessageMap + + + +
+ +(Optional) + +

+A map that stores detailed message about the Component. +Each entry in the map provides insights into specific elements of the Component, such as Pods or workloads. +

+ +

+Keys in this map are formatted as `ObjectKind/Name`, where `ObjectKind` could be a type like Pod, +and `Name` is the specific name of the object. +

+ +
+

+ComponentSwitchover + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withCandidate`
+ + +Action + + + +
+ +(Optional) + +

+Represents the switchover process for a specified candidate primary or leader instance. +Note that only Action.Exec is currently supported, while Action.HTTP is not. +

+ +
+ +`withoutCandidate`
+ + +Action + + + +
+ +(Optional) + +

+Represents a switchover process that does not involve a specific candidate primary or leader instance. +As with the previous field, only Action.Exec is currently supported, not Action.HTTP. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to define the selectors for the scriptSpecs that need to be referenced. +If this field is set, the scripts defined under the ‘scripts’ field can be invoked or referenced within an Action. +

+ +

+This field is deprecated from v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+

+ComponentSystemAccount + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentTemplateSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentConfigSpec, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. +

+ +
+ +`templateRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+Refers to the volume name of PodTemplate. The configuration file produced through the configuration +template will be mounted to the corresponding volume. Must be a DNS_LABEL name. +The volume name must be defined in podSpec.containers[*].volumeMounts. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The operator attempts to set default file permissions for scripts (0555) and configurations (0444). +However, certain database engines may require different file permissions. +You can specify the desired file permissions here. +

+ +

+Must be specified as an octal value between 0000 and 0777 (inclusive), +or as a decimal value between 0 and 511 (inclusive). +YAML supports both octal and decimal values for file permissions. +

+ +

+Please note that this setting only affects the permissions of the files themselves. +Directories within the specified path are not impacted by this setting. +It’s important to be aware that this setting might conflict with other options +that influence the file mode, such as fsGroup. +In such cases, the resulting file mode may have additional bits set. +Refers to documents of k8s.ConfigMapVolumeSource.defaultMode for more information. +

+ +
+

+ComponentValueFrom + +

+ +

+ +(Appears on:ComponentRefEnv) + +

+
+ +

+ComponentValueFrom is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +ComponentValueFromType + + + +
+ + +

+Specifies the source to select. It can be one of three types: `FieldRef`, `ServiceRef`, `HeadlessServiceRef`. +

+ +
+ +`fieldPath`
+ +string + + +
+ +(Optional) + +

+The jsonpath of the source to select when the Type is `FieldRef`. +Two objects are registered in the jsonpath: `componentDef` and `components`: +

+
    +
  • +`componentDef` is the component definition object specified in `componentRef.componentDefName`. +
  • +
  • +`components` are the component list objects referring to the component definition object. +
  • +
+ +
+ +`format`
+ +string + + +
+ +(Optional) + +

+Defines the format of each headless service address. +Three builtin variables can be used as placeholders: `$POD_ORDINAL`, `$POD_FQDN`, `$POD_NAME` +

+
    +
  • +`$POD_ORDINAL` represents the ordinal of the pod. +
  • +
  • +`$POD_FQDN` represents the fully qualified domain name of the pod. +
  • +
  • +`$POD_NAME` represents the name of the pod. +
  • +
+ +
+ +`joinWith`
+ +string + + +
+ +(Optional) + +

+The string used to join the values of headless service addresses. +

+ +
+

+ComponentValueFromType +(`string` alias) +

+ +

+ +(Appears on:ComponentValueFrom) + +

+
+ +

+ComponentValueFromType specifies the type of component value from which the data is derived. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"FieldRef" +

+
+ +

+FromFieldRef refers to the value of a specific field in the object. +

+ +
+ +

+"HeadlessServiceRef" +

+
+ +

+FromHeadlessServiceRef refers to a headless service within the same namespace as the object. +

+ +
+ +

+"ServiceRef" +

+
+ +

+FromServiceRef refers to a service within the same namespace as the object. +

+ +
+

+ComponentVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ComponentVarSelector selects a var from a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ComponentVars`
+ + +ComponentVars + + + +
+ + +

+ +(Members of `ComponentVars` are embedded into this type.) + +

+ +
+

+ComponentVars + +

+ +

+ +(Appears on:ComponentVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Component object. +

+ +
+ +`replicas`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the replicas of the component. +

+ +
+ +`instanceNames`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod name list of the component. +and the value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+

+ComponentVersionCompatibilityRule + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionCompatibilityRule defines the compatibility between a set of component definitions and a set of releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this ComponentVersion. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`releases`
+ +[]string + + +
+ + +

+Releases is a list of identifiers for the releases. +

+ +
+

+ComponentVersionRelease + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionRelease represents a release of component instances within a ComponentVersion. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is a unique identifier for this release. +Cannot be updated. +

+ +
+ +`changes`
+ +string + + +
+ +(Optional) + +

+Changes provides information about the changes made in this release. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+ServiceVersion defines the version of the well-known service that the component provides. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If the release is used, it will serve as the service version for component instances, overriding the one defined in the component definition. +Cannot be updated. +

+ +
+ +`images`
+ +map[string]string + + +
+ + +

+Images define the new images for different containers within the release. +

+ +
+

+ComponentVersionSpec + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionSpec defines the desired state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+

+ComponentVersionStatus + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionStatus defines the observed state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this ComponentVersion. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase valid values are ``,`Available`, 'Unavailable`. +Available is ComponentVersion become available, and can be used for co-related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Extra message for current phase. +

+ +
+ +`serviceVersions`
+ +string + + +
+ +(Optional) + +

+ServiceVersions represent the supported service versions of this ComponentVersion. +

+ +
+

+ComponentVolume + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the volume. +It must be a DNS_LABEL and unique within the pod. +More info can be found at: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +Note: This field cannot be updated. +

+ +
+ +`needSnapshot`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the creation of a snapshot of this volume is necessary when performing a backup of the Component. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Sets the critical threshold for volume space utilization as a percentage (0-100). +

+ +

+Exceeding this percentage triggers the system to switch the volume to read-only mode as specified in +`componentDefinition.spec.lifecycleActions.readOnly`. +This precaution helps prevent space depletion while maintaining read-only access. +If the space utilization later falls below this threshold, the system reverts the volume to read-write mode +as defined in `componentDefinition.spec.lifecycleActions.readWrite`, restoring full functionality. +

+ +

+Note: This field cannot be updated. +

+ +
+

+ConfigConstraintSpec + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintSpec defines the desired state of ConfigConstraint +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`reloadOptions`
+ + +ReloadOptions + + + +
+ +(Optional) + +

+Specifies the dynamic reload action supported by the engine. +When set, the controller executes the method defined here to execute hot parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `reloadStaticParamsBeforeRestart` is set to true, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadOptions` is set. +
  4. +
+ +

+If `reloadOptions` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+reloadOptions:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`dynamicActionCanBeMerged`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadOptions` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “true” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`toolsImageSpec`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`downwardAPIOptions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`scriptConfigs`
+ + +[]ScriptConfig + + + +
+ +(Optional) + +

+A list of ScriptConfig Object. +

+ +

+Each ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload +and DownwardAction to perform specific tasks or configurations. +

+ +
+ +`cfgSchemaTopLevelName`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configurationSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configurationSchema.cue’. +

+ +
+ +`configurationSchema`
+ + +CustomParametersValidation + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `selector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `selector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+ +`formatterConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+formatterConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+

+ConfigConstraintStatus + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintStatus represents the observed state of a ConfigConstraint. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ConfigConstraintPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to CCAvailablePhase, the ConfigConstraint can be referenced by ClusterDefinition. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides descriptions for abnormal states. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation observed for this ConfigConstraint. This value is updated by the API Server. +

+ +
+

+ConfigMapRef + +

+ +

+ +(Appears on:UserResourceRefs) + +

+
+ +

+ConfigMapRef defines a reference to a ConfigMap. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ResourceMeta`
+ + +ResourceMeta + + + +
+ + +

+ +(Members of `ResourceMeta` are embedded into this type.) + +

+ +
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ + +

+ConfigMap specifies the ConfigMap to be mounted as a volume. +

+ +
+

+ConfigParams + +

+ +

+ +(Appears on:ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`content`
+ +string + + +
+ +(Optional) + +

+Holds the configuration keys and values. This field is a workaround for issues found in kubebuilder and code-generator. +Refer to https://github.com/kubernetes-sigs/kubebuilder/issues/528 and https://github.com/kubernetes/code-generator/issues/50 for more details. +

+ +

+Represents the content of the configuration file. +

+ +
+ +`parameters`
+ +map[string]*string + + +
+ +(Optional) + +

+Represents the updated parameters for a single configuration file. +

+ +
+

+ConfigTemplateExtension + +

+ +

+ +(Appears on:ConfigurationItemDetail, LegacyRenderedTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templateRef`
+ +string + + +
+ + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`policy`
+ + +MergedPolicy + + + +
+ +(Optional) + +

+Defines the strategy for merging externally imported templates into component templates. +

+ +
+

+ConfigurationItemDetail + +

+ +

+ +(Appears on:ConfigurationSpec) + +

+
+ +

+ConfigurationItemDetail corresponds to settings of a configuration template (a ConfigMap). +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the configuration template. +

+ +

+It must be a string of maximum 63 characters, and can only include lowercase alphanumeric characters, +hyphens, and periods. +The name must start and end with an alphanumeric character. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Deprecated: No longer used. Please use ‘Payload’ instead. Previously represented the version of the configuration template. +

+ +
+ +`payload`
+ + +Payload + + + +
+ +(Optional) + +

+External controllers can trigger a configuration rerender by modifying this field. +

+ +

+Note: Currently, the `payload` field is opaque and its content is not interpreted by the system. +Modifying this field will cause a rerender, regardless of the specific content of this field. +

+ +
+ +`configSpec`
+ + +ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the name of the configuration template (a ConfigMap), ConfigConstraint, and other miscellaneous options. +

+ +

+The configuration template is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+ConfigConstraint allows defining constraints and validation rules for configuration parameters. +It ensures that the configuration adheres to certain requirements and limitations. +

+ +
+ +`importTemplateRef`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+ +`configFileParams`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1alpha1.ConfigParams + + + +
+ +(Optional) + +

+Specifies the user-defined configuration parameters. +

+ +

+When provided, the parameter values in `configFileParams` override the default configuration parameters. +This allows users to override the default configuration according to their specific needs. +

+ +
+

+ConfigurationItemDetailStatus + +

+ +

+ +(Appears on:ConfigurationStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. It is a required field and must be a string of maximum 63 characters. +The name should only contain lowercase alphanumeric characters, hyphens, or periods. It should start and end with an alphanumeric character. +

+ +
+ +`phase`
+ + +ConfigurationPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`lastDoneRevision`
+ +string + + +
+ +(Optional) + +

+Represents the last completed revision of the configuration item. This field is optional. +

+ +
+ +`updateRevision`
+ +string + + +
+ +(Optional) + +

+Represents the updated revision of the configuration item. This field is optional. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. This field is optional. +

+ +
+ +`reconcileDetail`
+ + +ReconcileDetail + + + +
+ +(Optional) + +

+Provides detailed information about the execution of the configuration change. This field is optional. +

+ +
+

+ConfigurationPhase +(`string` alias) +

+ +

+ +(Appears on:ConfigurationItemDetailStatus) + +

+
+ +

+ConfigurationPhase defines the Configuration FSM phase +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"FailedAndPause" +

+
+ +
+ +

+"FailedAndRetry" +

+
+ +
+ +

+"Finished" +

+
+ +
+ +

+"Init" +

+
+ +
+ +

+"MergeFailed" +

+
+ +
+ +

+"Merged" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Upgrading" +

+
+ +
+

+ConfigurationSpec + +

+ +

+ +(Appears on:Configuration) + +

+
+ +

+ConfigurationSpec defines the desired state of a Configuration resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterRef`
+ +string + + +
+ + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigurationItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigurationItemDetail objects. +

+ +

+Each ConfigurationItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigurationItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+

+ConfigurationStatus + +

+ +

+ +(Appears on:Configuration) + +

+
+ +

+ConfigurationStatus represents the observed state of a Configuration resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides detailed status information for opsRequest. +

+ +
+ +`configurationStatus`
+ + +[]ConfigurationItemDetailStatus + + + +
+ + +

+Provides the status of each component undergoing reconfiguration. +

+ +
+

+ConnectionCredentialAuth + +

+ +

+ +(Appears on:ServiceDescriptorSpec) + +

+
+ +

+ConnectionCredentialAuth specifies the authentication credentials required for accessing an external service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the username for the external service. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the password for the external service. +

+ +
+

+ConsensusMember + +

+ +

+ +(Appears on:ConsensusSetSpec) + +

+
+ +

+ConsensusMember is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the consensus member. +

+ +
+ +`accessMode`
+ + +AccessMode + + + +
+ + +

+Specifies the services that this member is capable of providing. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Indicates the number of Pods that perform this role. +The default is 1 for `Leader`, 0 for `Learner`, others for `Followers`. +

+ +
+

+ConsensusSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ConsensusSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`StatefulSetSpec`
+ + +StatefulSetSpec + + + +
+ + +

+ +(Members of `StatefulSetSpec` are embedded into this type.) + +

+ +
+ +`leader`
+ + +ConsensusMember + + + +
+ + +

+Represents a single leader in the consensus set. +

+ +
+ +`followers`
+ + +[]ConsensusMember + + + +
+ +(Optional) + +

+Members of the consensus set that have voting rights but are not the leader. +

+ +
+ +`learner`
+ + +ConsensusMember + + + +
+ +(Optional) + +

+Represents a member of the consensus set that does not have voting rights. +

+ +
+

+ContainerVars + +

+ +

+ +(Appears on:HostNetworkVars) + +

+
+ +

+ContainerVars defines the vars that can be referenced from a Container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the container. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Container port to reference. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:ConnectionCredentialAuth, ServiceDescriptorSpec) + +

+
+ +

+CredentialVar represents a variable that retrieves its value either directly from a specified expression +or from a source defined in `valueFrom`. +Only one of these options may be used at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Holds a direct string or an expression that can be evaluated to a string. +

+ +

+It can include variables denoted by $(VAR_NAME). +These variables are expanded to the value of the environment variables defined in the container. +If a variable cannot be resolved, it remains unchanged in the output. +

+ +

+To escape variable expansion and retain the literal value, use double $ characters. +

+ +

+For example: +

+
    +
  • +”$(VAR_NAME)” will be expanded to the value of the environment variable VAR_NAME. +
  • +
  • +”$$(VAR_NAME)” will result in “$(VAR_NAME)” in the output, without any variable expansion. +
  • +
+ +

+Default value is an empty string. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Specifies the source for the variable’s value. +

+ +
+

+CredentialVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+CredentialVarSelector selects a var from a Credential (SystemAccount). +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Credential (SystemAccount) to select from. +

+ +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+CredentialVars + +

+ +

+ +(Appears on:CredentialVarSelector, ServiceRefVars) + +

+
+ +

+CredentialVars defines the vars that can be referenced from a Credential (SystemAccount). +!!!!! CredentialVars will only be used as environment variables for Pods & Actions, and will not be used to render the templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`password`
+ + +VarOption + + + +
+ +(Optional) + +
+

+CustomLabelSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+CustomLabelSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ +string + + +
+ + +

+The key of the label. +

+ +
+ +`value`
+ +string + + +
+ + +

+The value of the label. +

+ +
+ +`resources`
+ + +[]GVKResource + + + +
+ + +

+The resources that will be patched with the label. +

+ +
+

+CustomParametersValidation + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+CustomParametersValidation Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+EnvVar + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+EnvVar represents a variable present in the env of Pod/Action or the template of config/script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the variable. Must be a C_IDENTIFIER. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Variable references `$(VAR_NAME)` are expanded using the previously defined variables in the current context. +

+ +

+If a variable cannot be resolved, the reference in the input string will be unchanged. +Double `$$` are reduced to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. +

+
    +
  • +`$$(VAR_NAME)` will produce the string literal `$(VAR_NAME)`. +
  • +
+ +

+Escaped references will never be expanded, regardless of whether the variable exists or not. +Defaults to “”. +

+ +
+ +`valueFrom`
+ + +VarSource + + + +
+ +(Optional) + +

+Source for the variable’s value. Cannot be used if value is not empty. +

+ +
+ +`expression`
+ +string + + +
+ +(Optional) + +

+A Go template expression that will be applied to the resolved value of the var. +

+ +

+The expression will only be evaluated if the var is successfully resolved to a non-credential value. +

+ +

+The resolved value can be accessed by its name within the expression, system vars and other user-defined +non-credential vars can be used within the expression in the same way. +Notice that, when accessing vars by its name, you should replace all the “-” in the name with “_”, because of +that “-” is not a valid identifier in Go. +

+ +

+All expressions are evaluated in the order the vars are defined. If a var depends on any vars that also +have expressions defined, be careful about the evaluation order as it may use intermediate values. +

+ +

+The result of evaluation will be used as the final value of the var. If the expression fails to evaluate, +the resolving of var will also be considered failed. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+ExecAction describes an Action that executes a command inside a container. +Which may run as a K8s job or be executed inside the Lorry sidecar container, depending on the implementation. +Future implementations will standardize execution within Lorry. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed inside the container. +The working directory for this command is the container’s root directory(‘/’). +Commands are executed directly without a shell environment, meaning shell-specific syntax (‘|’, etc.) is not supported. +If the shell is required, it must be explicitly invoked in the command. +

+ +

+A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Args represents the arguments that are passed to the `command` for execution. +

+ +
+

+Exporter + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the built-in metrics exporter container. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+Specifies the http/https url path to scrape for metrics. +If empty, Prometheus uses the default value (e.g. `/metrics`). +

+ +
+ +`scrapePort`
+ +string + + +
+ +(Optional) + +

+Specifies the port name to scrape for metrics. +

+ +
+ +`scrapeScheme`
+ + +PrometheusScheme + + + +
+ +(Optional) + +

+Specifies the schema to use for scraping. +`http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. +If empty, Prometheus uses the default value `http`. +

+ +
+

+ExporterConfig + +

+ +

+ +(Appears on:MonitorConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scrapePort`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ + +

+scrapePort is exporter port for Time Series Database to scrape metrics. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+scrapePath is exporter url path for Time Series Database to scrape metrics. +

+ +
+

+FailurePolicyType +(`string` alias) +

+ +

+ +(Appears on:ComponentDefRef) + +

+
+ +

+FailurePolicyType specifies the type of failure policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Fail" +

+
+ +

+FailurePolicyFail means that an error will be reported. +

+ +
+ +

+"Ignore" +

+
+ +

+FailurePolicyIgnore means that an error will be ignored but logged. +

+ +
+

+GVKResource + +

+ +

+ +(Appears on:CustomLabelSpec) + +

+
+ +

+GVKResource is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`gvk`
+ +string + + +
+ + +

+Represents the GVK of a resource, such as “v1/Pod”, “apps/v1/StatefulSet”, etc. +When a resource matching this is found by the selector, a custom label will be added if it doesn’t already exist, +or updated if it does. +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+A label query used to filter a set of resources. +

+ +
+

+HScaleDataClonePolicyType +(`string` alias) +

+ +

+ +(Appears on:HorizontalScalePolicy) + +

+
+ +

+HScaleDataClonePolicyType defines the data clone policy to be used during horizontal scaling. +This policy determines how data is handled when new nodes are added to the cluster. +The policy can be set to `None`, `CloneVolume`, or `Snapshot`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"CloneVolume" +

+
+ +

+HScaleDataClonePolicyCloneVolume indicates that data will be cloned from existing volumes during horizontal scaling. +

+ +
+ +

+"Snapshot" +

+
+ +

+HScaleDataClonePolicyFromSnapshot indicates that data will be cloned from a snapshot during horizontal scaling. +

+ +
+ +

+"None" +

+
+ +

+HScaleDataClonePolicyNone indicates that no data cloning will occur during horizontal scaling. +

+ +
+

+HTTPAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+HTTPAction describes an Action that triggers HTTP requests. +HTTPAction is to be implemented in future version. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`path`
+ +string + + +
+ +(Optional) + +

+Specifies the endpoint to be requested on the HTTP server. +

+ +
+ +`port`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ + +

+Specifies the target port for the HTTP request. +It can be specified either as a numeric value in the range of 1 to 65535, +or as a named port that meets the IANA_SVC_NAME specification. +

+ +
+ +`host`
+ +string + + +
+ +(Optional) + +

+Indicates the server’s domain name or IP address. Defaults to the Pod’s IP. +Prefer setting the “Host” header in httpHeaders when needed. +

+ +
+ +`scheme`
+ + +Kubernetes core/v1.URIScheme + + + +
+ +(Optional) + +

+Designates the protocol used to make the request, such as HTTP or HTTPS. +If not specified, HTTP is used by default. +

+ +
+ +`method`
+ +string + + +
+ +(Optional) + +

+Represents the type of HTTP request to be made, such as “GET,” “POST,” “PUT,” etc. +If not specified, “GET” is the default method. +

+ +
+ +`httpHeaders`
+ + +[]Kubernetes core/v1.HTTPHeader + + + +
+ +(Optional) + +

+Allows for the inclusion of custom headers in the request. +HTTP permits the use of repeated headers. +

+ +
+

+HorizontalScalePolicy + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+HorizontalScalePolicy is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +HScaleDataClonePolicyType + + + +
+ +(Optional) + +

+Determines the data synchronization method when a component scales out. +The policy can be one of the following: {None, CloneVolume}. The default policy is `None`. +

+
    +
  • +`None`: This is the default policy. It creates an empty volume without data cloning. +
  • +
  • +`CloneVolume`: This policy clones data to newly scaled pods. It first tries to use a volume snapshot. +If volume snapshot is not enabled, it will attempt to use a backup tool. If neither method works, it will report an error. +
  • +
  • +`Snapshot`: This policy is deprecated and is an alias for CloneVolume. +
  • +
+ +
+ +`backupPolicyTemplateName`
+ +string + + +
+ +(Optional) + +

+Refers to the backup policy template. +

+ +
+ +`volumeMountsName`
+ +string + + +
+ +(Optional) + +

+Specifies the volumeMount of the container to backup. +This only works if Type is not None. If not specified, the first volumeMount will be selected. +

+ +
+

+HostNetwork + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerPorts`
+ + +[]HostNetworkContainerPort + + + +
+ +(Optional) + +

+The list of container ports that are required by the component. +

+ +
+

+HostNetworkContainerPort + +

+ +

+ +(Appears on:HostNetwork) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ + +

+Container specifies the target container within the Pod. +

+ +
+ +`ports`
+ +[]string + + +
+ + +

+Ports are named container ports within the specified container. +These container ports must be defined in the container for proper port allocation. +

+ +
+

+HostNetworkVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+HostNetworkVarSelector selects a var from host-network resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The component to select from. +

+ +
+ +`HostNetworkVars`
+ + +HostNetworkVars + + + +
+ + +

+ +(Members of `HostNetworkVars` are embedded into this type.) + +

+ +
+

+HostNetworkVars + +

+ +

+ +(Appears on:HostNetworkVarSelector) + +

+
+ +

+HostNetworkVars defines the vars that can be referenced from host-network resources. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ + +ContainerVars + + + +
+ +(Optional) + +
+

+InstanceTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies an override for the first container’s image in the Pod. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+Defines Volumes to override. +Add new or override existing volumes. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines VolumeMounts to override. +Add new or override existing volume mounts of the first container in the Pod. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Defines VolumeClaimTemplates to override. +Add new or override existing volume claim templates. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceUpdateStrategy indicates the strategy that the InstanceSet +controller will use to perform updates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`partition`
+ +int32 + + +
+ +(Optional) + +

+Partition indicates the number of pods that should be updated during a rolling update. +The remaining pods will remain untouched. This is helpful in defining how many pods +should participate in the update process. The update process will follow the order +of pod names in descending lexicographical (dictionary) order. The default value is +ComponentSpec.Replicas (i.e., update all pods). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all pods. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+

+Issuer + +

+ +

+ +(Appears on:ClusterComponentSpec, TLSConfig) + +

+
+ +

+Issuer defines the TLS certificates issuer for the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +IssuerName + + + +
+ + +

+The issuer for TLS certificates. +It only allows two enum values: `KubeBlocks` and `UserProvided`. +

+
    +
  • +`KubeBlocks` indicates that the self-signed TLS certificates generated by the KubeBlocks Operator will be used. +
  • +
  • +`UserProvided` means that the user is responsible for providing their own CA, Cert, and Key. +In this case, the user-provided CA certificate, server certificate, and private key will be used +for TLS communication. +
  • +
+ +
+ +`secretRef`
+ + +TLSSecretRef + + + +
+ +(Optional) + +

+SecretRef is the reference to the secret that contains user-provided certificates. +It is required when the issuer is set to `UserProvided`. +

+ +
+

+IssuerName +(`string` alias) +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+IssuerName defines the name of the TLS certificates issuer. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeBlocks" +

+
+ +

+IssuerKubeBlocks represents certificates that are signed by the KubeBlocks Operator. +

+ +
+ +

+"UserProvided" +

+
+ +

+IssuerUserProvided indicates that the user has provided their own CA-signed certificates. +

+ +
+

+LegacyRenderedTemplateSpec + +

+ +

+ +(Appears on:ComponentConfigSpec) + +

+
+ +

+LegacyRenderedTemplateSpec describes the configuration extension for the lazy rendered template. +Deprecated: LegacyRenderedTemplateSpec has been deprecated since 0.9.0 and will be removed in 0.10.0 +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ConfigTemplateExtension`
+ + +ConfigTemplateExtension + + + +
+ + +

+ +(Members of `ConfigTemplateExtension` are embedded into this type.) + +

+ +

+Extends the configuration template. +

+ +
+

+LetterCase +(`string` alias) +

+ +

+ +(Appears on:PasswordConfig) + +

+
+ +

+LetterCase defines the available cases to be used in password generation. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"LowerCases" +

+
+ +

+LowerCases represents the use of lower case letters only. +

+ +
+ +

+"MixedCases" +

+
+ +

+MixedCases represents the use of a mix of both lower and upper case letters. +

+ +
+ +

+"UpperCases" +

+
+ +

+UpperCases represents the use of upper case letters only. +

+ +
+

+LifecycleActionHandler + +

+ +

+ +(Appears on:ComponentLifecycleActions, RoleProbe) + +

+
+ +

+LifecycleActionHandler describes the implementation of a specific lifecycle action. +

+ +

+Each action is deemed successful if it returns an exit code of 0 for command executions, +or an HTTP 200 status for HTTP(s) actions. +Any other exit code or HTTP status is considered an indication of failure. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtinHandler`
+ + +BuiltinActionHandlerType + + + +
+ +(Optional) + +

+Specifies the name of the predefined action handler to be invoked for lifecycle actions. +

+ +

+Lorry, as a sidecar agent co-located with the database container in the same Pod, +includes a suite of built-in action implementations that are tailored to different database engines. +These are known as “builtin” handlers, includes: `mysql`, `redis`, `mongodb`, `etcd`, +`postgresql`, `vanilla-postgresql`, `apecloud-postgresql`, `wesql`, `oceanbase`, `polardbx`. +

+ +

+If the `builtinHandler` field is specified, it instructs Lorry to utilize its internal built-in action handler +to execute the specified lifecycle actions. +

+ +

+The `builtinHandler` field is of type `BuiltinActionHandlerType`, +which represents the name of the built-in handler. +The `builtinHandler` specified within the same `ComponentLifecycleActions` should be consistent across all +actions. +This means that if you specify a built-in handler for one action, you should use the same handler +for all other actions throughout the entire `ComponentLifecycleActions` collection. +

+ +

+If you need to define lifecycle actions for database engines not covered by the existing built-in support, +or when the pre-existing built-in handlers do not meet your specific needs, +you can use the `customHandler` field to define your own action implementation. +

+ +

+Deprecation Notice: +

+
    +
  • +In the future, the `builtinHandler` field will be deprecated in favor of using the `customHandler` field +for configuring all lifecycle actions. +
  • +
  • +Instead of using a name to indicate the built-in action implementations in Lorry, +the recommended approach will be to explicitly invoke the desired action implementation through +a gRPC interface exposed by the sidecar agent. +
  • +
  • +Developers will have the flexibility to either use the built-in action implementations provided by Lorry +or develop their own sidecar agent to implement custom actions and expose them via gRPC interfaces. +
  • +
  • +This change will allow for greater customization and extensibility of lifecycle actions, +as developers can create their own “builtin” implementations tailored to their specific requirements. +
  • +
+ +
+ +`customHandler`
+ + +Action + + + +
+ +(Optional) + +

+Specifies a user-defined hook or procedure that is called to perform the specific lifecycle action. +It offers a flexible and expandable approach for customizing the behavior of a Component by leveraging +tailored actions. +

+ +

+An Action can be implemented as either an ExecAction or an HTTPAction, with future versions planning +to support GRPCAction, +thereby accommodating unique logic for different database systems within the Action’s framework. +

+ +

+In future iterations, all built-in handlers are expected to transition to GRPCAction. +This change means that Lorry or other sidecar agents will expose the implementation of actions +through a GRPC interface for external invocation. +Then the controller will interact with these actions via GRPCAction calls. +

+ +
+

+LogConfig + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a descriptive label for the log type, such as ‘slow’ for a MySQL slow log file. +It provides a clear identification of the log’s purpose and content. +

+ +
+ +`filePathPattern`
+ +string + + +
+ + +

+Specifies the paths or patterns identifying where the log files are stored. +This field allows the system to locate and manage log files effectively. +

+ +

+Examples: +

+
    +
  • +/home/postgres/pgdata/pgroot/data/log/postgresql-* +
  • +
  • +/data/mysql/log/mysqld-error.log +
  • +
+ +
+

+MergedPolicy +(`string` alias) +

+ +

+ +(Appears on:ConfigTemplateExtension) + +

+
+ +

+MergedPolicy defines how to merge external imported templates into component templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"none" +

+
+ +
+ +

+"add" +

+
+ +
+ +

+"patch" +

+
+ +
+ +

+"replace" +

+
+ +
+

+MonitorConfig + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtIn`
+ +bool + + +
+ +(Optional) + +

+builtIn is a switch to enable KubeBlocks builtIn monitoring. +If BuiltIn is set to true, monitor metrics will be scraped automatically. +If BuiltIn is set to false, the provider should set ExporterConfig and Sidecar container own. +

+ +
+ +`exporterConfig`
+ + +ExporterConfig + + + +
+ +(Optional) + +

+exporterConfig provided by provider, which specify necessary information to Time Series Database. +exporterConfig is valid when builtIn is false. +

+ +
+

+MultipleClusterObjectCombinedOption + +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectCombinedOption defines options for handling combined variables. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`newVarSuffix`
+ +string + + +
+ +(Optional) + +

+If set, the existing variable will be kept, and a new variable will be defined with the specified suffix +in pattern: $(var.name)_$(suffix). +The new variable will be auto-created and placed behind the existing one. +If not set, the existing variable will be reused with the value format defined below. +

+ +
+ +`valueFormat`
+ + +MultipleClusterObjectValueFormat + + + +
+ +(Optional) + +

+The format of the value that the operator will use to compose values from multiple components. +

+ +
+ +`flattenFormat`
+ + +MultipleClusterObjectValueFormatFlatten + + + +
+ +(Optional) + +

+The flatten format, default is: $(comp-name-1):value,$(comp-name-2):value. +

+ +
+

+MultipleClusterObjectOption + +

+ +

+ +(Appears on:ClusterObjectReference) + +

+
+ +

+MultipleClusterObjectOption defines the options for handling multiple cluster objects matched. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requireAllComponentObjects`
+ +bool + + +
+ +(Optional) + +

+RequireAllComponentObjects controls whether all component objects must exist before resolving. +If set to true, resolving will only proceed if all component objects are present. +

+ +
+ +`strategy`
+ + +MultipleClusterObjectStrategy + + + +
+ + +

+Define the strategy for handling multiple cluster objects. +

+ +
+ +`combinedOption`
+ + +MultipleClusterObjectCombinedOption + + + +
+ +(Optional) + +

+Define the options for handling combined variables. +Valid only when the strategy is set to “combined”. +

+ +
+

+MultipleClusterObjectStrategy +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectStrategy defines the strategy for handling multiple cluster objects. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"combined" +

+
+ +

+MultipleClusterObjectStrategyCombined - the values from all matched components will be combined into a single +variable using the specified option. +

+ +
+ +

+"individual" +

+
+ +

+MultipleClusterObjectStrategyIndividual - each matched component will have its individual variable with its name +as the suffix. +This is required when referencing credential variables that cannot be passed by values. +

+ +
+

+MultipleClusterObjectValueFormat +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormat defines the format details for the value. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Flatten" +

+
+ +
+

+MultipleClusterObjectValueFormatFlatten + +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormatFlatten defines the flatten format for the value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`delimiter`
+ +string + + +
+ + +

+Pair delimiter. +

+ +
+ +`keyValueDelimiter`
+ +string + + +
+ + +

+Key-value delimiter. +

+ +
+

+NamedVar + +

+ +

+ +(Appears on:ContainerVars, ServiceVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+PasswordConfig + +

+ +

+ +(Appears on:ComponentSystemAccount, SystemAccount, SystemAccountSpec) + +

+
+ +

+PasswordConfig helps provide to customize complexity of password generation pattern. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`length`
+ +int32 + + +
+ +(Optional) + +

+The length of the password. +

+ +
+ +`numDigits`
+ +int32 + + +
+ +(Optional) + +

+The number of digits in the password. +

+ +
+ +`numSymbols`
+ +int32 + + +
+ +(Optional) + +

+The number of symbols in the password. +

+ +
+ +`letterCase`
+ + +LetterCase + + + +
+ +(Optional) + +

+The case of the letters in the password. +

+ +
+ +`seed`
+ +string + + +
+ +(Optional) + +

+Seed to generate the account’s password. +Cannot be updated. +

+ +
+

+Payload + +

+ +

+ +(Appears on:ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`-`
+ +map[string]any + + +
+ +(Optional) + +

+Holds the payload data. This field is optional and can contain any type of data. +Not included in the JSON representation of the object. +

+ +
+

+PersistentVolumeClaimSpec + +

+ +

+ +(Appears on:ClusterComponentVolumeClaimTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ClusterDefinitionStatus, ComponentDefinitionStatus, ComponentVersionStatus, ServiceDescriptorStatus) + +

+
+ +

+Phase represents the current status of the ClusterDefinition CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that the object is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that the object is in an unavailable state. +

+ +
+

+PodAntiAffinity +(`string` alias) +

+ +

+ +(Appears on:Affinity) + +

+
+ +

+PodAntiAffinity defines the pod anti-affinity strategy. +

+ +

+This strategy determines how pods are scheduled in relation to other pods, with the aim of either spreading pods +across nodes (Preferred) or ensuring that certain pods do not share a node (Required). +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Preferred" +

+
+ +

+Preferred indicates that the scheduler will try to enforce the anti-affinity rules, but it will not guarantee it. +

+ +
+ +

+"Required" +

+
+ +

+Required indicates that the scheduler must enforce the anti-affinity rules and will not schedule the pods unless +the rules are met. +

+ +
+

+PodAvailabilityPolicy +(`string` alias) +

+
+ +

+PodAvailabilityPolicy pod availability strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"None" +

+
+ +
+ +

+"UnAvailable" +

+
+ +
+

+PostStartAction + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+PostStartAction is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Specifies the post-start command to be executed. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to select the script that need to be referenced. +When defined, the scripts defined in scriptSpecs can be referenced within the CmdExecutorConfig. +

+ +
+

+PreConditionType +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+PreConditionType defines the preCondition type of the action execution. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"ClusterReady" +

+
+ +
+ +

+"ComponentReady" +

+
+ +
+ +

+"Immediately" +

+
+ +
+ +

+"RuntimeReady" +

+
+ +
+

+Probe + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Action`
+ + +Action + + + +
+ + +

+ +(Members of `Action` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Minimum value is 1. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1. +

+ +
+

+PrometheusScheme +(`string` alias) +

+ +

+ +(Appears on:Exporter) + +

+
+ +

+PrometheusScheme defines the protocol of prometheus scrape metrics. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"http" +

+
+ +
+ +

+"https" +

+
+ +
+

+ProtectedVolume + +

+ +

+ +(Appears on:VolumeProtectionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The Name of the volume to protect. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Defines the high watermark threshold for the volume, it will override the component level threshold. +If the value is invalid, it will be ignored and the component level threshold will be used. +

+ +
+

+ProvisionPolicy + +

+ +

+ +(Appears on:SystemAccountConfig) + +

+
+ +

+ProvisionPolicy defines the policy details for creating accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +ProvisionPolicyType + + + +
+ + +

+Specifies the method to provision an account. +

+ +
+ +`scope`
+ + +ProvisionScope + + + +
+ + +

+Defines the scope within which the account is provisioned. +

+ +
+ +`statements`
+ + +ProvisionStatements + + + +
+ +(Optional) + +

+The statement to provision an account. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+The external secret to refer. +

+ +
+

+ProvisionPolicyType +(`string` alias) +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionPolicyType defines the policy for creating accounts. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"CreateByStmt" +

+
+ +

+CreateByStmt will create account w.r.t. deletion and creation statement given by provider. +

+ +
+ +

+"ReferToExisting" +

+
+ +

+ReferToExisting will not create account, but create a secret by copying data from referred secret file. +

+ +
+

+ProvisionScope +(`string` alias) +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionScope defines the scope of provision within a component. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"AllPods" +

+
+ +

+AllPods indicates that accounts will be created for all pods within the component. +

+ +
+ +

+"AnyPods" +

+
+ +

+AnyPods indicates that accounts will be created only on a single pod within the component. +

+ +
+

+ProvisionSecretRef + +

+ +

+ +(Appears on:ComponentSystemAccount, ProvisionPolicy, SystemAccount) + +

+
+ +

+ProvisionSecretRef represents the reference to a secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The unique identifier of the secret. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+The namespace where the secret is located. +

+ +
+

+ProvisionStatements + +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionStatements defines the statements used to create accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`creation`
+ +string + + +
+ + +

+Specifies the statement required to create a new account with the necessary privileges. +

+ +
+ +`update`
+ +string + + +
+ +(Optional) + +

+Defines the statement required to update the password of an existing account. +

+ +
+ +`deletion`
+ +string + + +
+ +(Optional) + +

+Defines the statement required to delete an existing account. +Typically used in conjunction with the creation statement to delete an account before recreating it. +For example, one might use a `drop user if exists` statement followed by a `create user` statement to ensure a fresh account. +

+ +

+Deprecated: This field is deprecated and the update statement should be used instead. +

+ +
+

+RSMSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+RSMSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Specifies a list of roles defined within the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Defines the method used to probe a role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Indicates the actions required for dynamic membership reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Describes the strategy for updating Members (Pods). +

+
    +
  • +`Serial`: Updates Members sequentially to ensure minimum component downtime. +
  • +
  • +`BestEffortParallel`: Updates Members in parallel to ensure minimum component write downtime. +
  • +
  • +`Parallel`: Forces parallel updates. +
  • +
+ +
+

+ReconcileDetail + +

+ +

+ +(Appears on:ConfigurationItemDetailStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`policy`
+ +string + + +
+ +(Optional) + +

+Represents the policy applied during the most recent execution. +

+ +
+ +`execResult`
+ +string + + +
+ +(Optional) + +

+Represents the outcome of the most recent execution. +

+ +
+ +`currentRevision`
+ +string + + +
+ +(Optional) + +

+Represents the current revision of the configuration item. +

+ +
+ +`succeedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of pods where configuration changes were successfully applied. +

+ +
+ +`expectedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the total number of pods that require execution of configuration changes. +

+ +
+ +`errMessage`
+ +string + + +
+ +(Optional) + +

+Represents the error message generated when the execution of configuration changes fails. +

+ +
+

+ReloadOptions + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ReloadOptions defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+

+ReplicaRole + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicaRole represents a role that can be assumed by a component instance. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the role’s identifier. It is used to set the “apps.kubeblocks.io/role” label value +on the corresponding object. +

+ +

+This field is immutable once set. +

+ +
+ +`serviceable`
+ +bool + + +
+ +(Optional) + +

+Indicates whether a replica assigned this role is capable of providing services. +

+ +

+This field is immutable once set. +

+ +
+ +`writable`
+ +bool + + +
+ +(Optional) + +

+Determines if a replica in this role has the authority to perform write operations. +A writable replica can modify data, handle update operations. +

+ +

+This field is immutable once set. +

+ +
+ +`votable`
+ +bool + + +
+ +(Optional) + +

+Specifies whether a replica with this role has voting rights. +In distributed systems, this typically means the replica can participate in consensus decisions, +configuration changes, or other processes that require a quorum. +

+ +

+This field is immutable once set. +

+ +
+

+ReplicasLimit + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicasLimit defines the valid range of number of replicas supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minReplicas`
+ +int32 + + +
+ + +

+The minimum limit of replicas. +

+ +
+ +`maxReplicas`
+ +int32 + + +
+ + +

+The maximum limit of replicas. +

+ +
+

+ReplicationSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ReplicationSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`StatefulSetSpec`
+ + +StatefulSetSpec + + + +
+ + +

+ +(Members of `StatefulSetSpec` are embedded into this type.) + +

+ +
+

+RerenderResourceType +(`string` alias) +

+ +

+ +(Appears on:ComponentConfigSpec) + +

+
+ +

+RerenderResourceType defines the resource requirements for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"hscale" +

+
+ +
+ +

+"vscale" +

+
+ +
+ +

+"shardingHScale" +

+
+ +
+

+ResourceMeta + +

+ +

+ +(Appears on:ConfigMapRef, SecretRef) + +

+
+ +

+ResourceMeta encapsulates metadata and configuration for referencing ConfigMaps and Secrets as volumes. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the name of the referenced ConfigMap or Secret object. It must conform to DNS label standards. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+MountPoint is the filesystem path where the volume will be mounted. +

+ +
+ +`subPath`
+ +string + + +
+ +(Optional) + +

+SubPath specifies a path within the volume from which to mount. +

+ +
+ +`asVolumeFrom`
+ +[]string + + +
+ +(Optional) + +

+AsVolumeFrom lists the names of containers in which the volume should be mounted. +

+ +
+

+RetryPolicy + +

+ +

+ +(Appears on:Action) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`maxRetries`
+ +int + + +
+ +(Optional) + +

+Defines the maximum number of retry attempts that should be made for a given Action. +This value is set to 0 by default, indicating that no retries will be made. +

+ +
+ +`retryInterval`
+ +time.Duration + + +
+ +(Optional) + +

+Indicates the duration of time to wait between each retry attempt. +This value is set to 0 by default, indicating that there will be no delay between retry attempts. +

+ +
+

+RoleArbitrator +(`string` alias) +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+RoleArbitrator defines how to arbitrate the role of replicas. +

+ +

+Deprecated since v0.9 +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"External" +

+
+ +
+ +

+"Lorry" +

+
+ +
+

+RoleProbe + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`LifecycleActionHandler`
+ + +LifecycleActionHandler + + + +
+ + +

+ +(Members of `LifecycleActionHandler` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+

+ScriptSpecSelector + +

+ +

+ +(Appears on:ComponentSwitchover, PostStartAction, SwitchoverAction) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the name of the ScriptSpec referent. +

+ +
+

+SecretRef + +

+ +

+ +(Appears on:UserResourceRefs) + +

+
+ +

+SecretRef defines a reference to a Secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ResourceMeta`
+ + +ResourceMeta + + + +
+ + +

+ +(Members of `ResourceMeta` are embedded into this type.) + +

+ +
+ +`secret`
+ + +Kubernetes core/v1.SecretVolumeSource + + + +
+ + +

+Secret specifies the Secret to be mounted as a volume. +

+ +
+

+Service + +

+ +

+ +(Appears on:ClusterService, ComponentService) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the name of the service. +otherwise, it indicates the name of the service. +Others can refer to this service by its name. (e.g., connection credential) +Cannot be updated. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+ServiceName defines the name of the underlying service object. +If not specified, the default service name with different patterns will be used: +

+
    +
  • +CLUSTER_NAME: for cluster-level services +
  • +
  • +CLUSTER_NAME-COMPONENT_NAME: for component-level services +
  • +
+ +

+Only one default service name is allowed. +Cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`spec`
+ + +Kubernetes core/v1.ServiceSpec + + + +
+ +(Optional) + +

+Spec defines the behavior of a service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+Route service traffic to pods with label keys and values matching this +selector. If empty or not present, the service is assumed to have an +external process managing its endpoints, which Kubernetes will not +modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. +Ignored if type is ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/ +

+ +
+ +`clusterIP`
+ +string + + +
+ +(Optional) + +

+clusterIP is the IP address of the service and is usually assigned +randomly. If an address is specified manually, is in-range (as per +system configuration), and is not in use, it will be allocated to the +service; otherwise creation of the service will fail. This field may not +be changed through updates unless the type field is also being changed +to ExternalName (which requires this field to be blank) or the type +field is being changed from ExternalName (in which case this field may +optionally be specified, as describe above). Valid values are “None”, +empty string (“”), or a valid IP address. Setting this to “None” makes a +“headless service” (no virtual IP), which is useful when direct endpoint +connections are preferred and proxying is not required. Only applies to +types ClusterIP, NodePort, and LoadBalancer. If this field is specified +when creating a Service of type ExternalName, creation will fail. This +field will be wiped when updating a Service to type ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`clusterIPs`
+ +[]string + + +
+ +(Optional) + +

+ClusterIPs is a list of IP addresses assigned to this service, and are +usually assigned randomly. If an address is specified manually, is +in-range (as per system configuration), and is not in use, it will be +allocated to the service; otherwise creation of the service will fail. +This field may not be changed through updates unless the type field is +also being changed to ExternalName (which requires this field to be +empty) or the type field is being changed from ExternalName (in which +case this field may optionally be specified, as describe above). Valid +values are “None”, empty string (“”), or a valid IP address. Setting +this to “None” makes a “headless service” (no virtual IP), which is +useful when direct endpoint connections are preferred and proxying is +not required. Only applies to types ClusterIP, NodePort, and +LoadBalancer. If this field is specified when creating a Service of type +ExternalName, creation will fail. This field will be wiped when updating +a Service to type ExternalName. If this field is not specified, it will +be initialized from the clusterIP field. If this field is specified, +clients must ensure that clusterIPs[0] and clusterIP have the same +value. +

+ +

+This field may hold a maximum of two entries (dual-stack IPs, in either order). +These IPs must correspond to the values of the ipFamilies field. Both +clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`type`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+type determines how the Service is exposed. Defaults to ClusterIP. Valid +options are ExternalName, ClusterIP, NodePort, and LoadBalancer. +“ClusterIP” allocates a cluster-internal IP address for load-balancing +to endpoints. Endpoints are determined by the selector or if that is not +specified, by manual construction of an Endpoints object or +EndpointSlice objects. If clusterIP is “None”, no virtual IP is +allocated and the endpoints are published as a set of endpoints rather +than a virtual IP. +“NodePort” builds on ClusterIP and allocates a port on every node which +routes to the same endpoints as the clusterIP. +“LoadBalancer” builds on NodePort and creates an external load-balancer +(if supported in the current cloud) which routes to the same endpoints +as the clusterIP. +“ExternalName” aliases this service to the specified externalName. +Several other fields do not apply to ExternalName services. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +

+ +
+ +`externalIPs`
+ +[]string + + +
+ +(Optional) + +

+externalIPs is a list of IP addresses for which nodes in the cluster +will also accept traffic for this service. These IPs are not managed by +Kubernetes. The user is responsible for ensuring that traffic arrives +at a node with this IP. A common example is external load-balancers +that are not part of the Kubernetes system. +

+ +
+ +`sessionAffinity`
+ + +Kubernetes core/v1.ServiceAffinity + + + +
+ +(Optional) + +

+Supports “ClientIP” and “None”. Used to maintain session affinity. +Enable client IP based session affinity. +Must be ClientIP or None. +Defaults to None. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`loadBalancerIP`
+ +string + + +
+ +(Optional) + +

+Only applies to Service Type: LoadBalancer. +This feature depends on whether the underlying cloud-provider supports specifying +the loadBalancerIP when a load balancer is created. +This field will be ignored if the cloud-provider does not support the feature. +Deprecated: This field was under-specified and its meaning varies across implementations. +Using it is non-portable and it may not support dual-stack. +Users are encouraged to use implementation-specific annotations when available. +

+ +
+ +`loadBalancerSourceRanges`
+ +[]string + + +
+ +(Optional) + +

+If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature.” +More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ +

+ +
+ +`externalName`
+ +string + + +
+ +(Optional) + +

+externalName is the external reference that discovery mechanisms will +return as an alias for this service (e.g. a DNS CNAME record). No +proxying will be involved. Must be a lowercase RFC-1123 hostname +(https://tools.ietf.org/html/rfc1123) and requires `type` to be “ExternalName”. +

+ +
+ +`externalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceExternalTrafficPolicy + + + +
+ +(Optional) + +

+externalTrafficPolicy describes how nodes distribute service traffic they +receive on one of the Service’s “externally-facing” addresses (NodePorts, +ExternalIPs, and LoadBalancer IPs). If set to “Local”, the proxy will configure +the service in a way that assumes that external load balancers will take care +of balancing the service traffic between nodes, and so each node will deliver +traffic only to the node-local endpoints of the service, without masquerading +the client source IP. (Traffic mistakenly sent to a node with no endpoints will +be dropped.) The default value, “Cluster”, uses the standard behavior of +routing to all endpoints evenly (possibly modified by topology and other +features). Note that traffic sent to an External IP or LoadBalancer IP from +within the cluster will always get “Cluster” semantics, but clients sending to +a NodePort from within the cluster may need to take traffic policy into account +when picking a node. +

+ +
+ +`healthCheckNodePort`
+ +int32 + + +
+ +(Optional) + +

+healthCheckNodePort specifies the healthcheck nodePort for the service. +This only applies when type is set to LoadBalancer and +externalTrafficPolicy is set to Local. If a value is specified, is +in-range, and is not in use, it will be used. If not specified, a value +will be automatically allocated. External systems (e.g. load-balancers) +can use this port to determine if a given node holds endpoints for this +service or not. If this field is specified when creating a Service +which does not need it, creation will fail. This field will be wiped +when updating a Service to no longer need it (e.g. changing type). +This field cannot be updated once set. +

+ +
+ +`publishNotReadyAddresses`
+ +bool + + +
+ +(Optional) + +

+publishNotReadyAddresses indicates that any agent which deals with endpoints for this +Service should disregard any indications of ready/not-ready. +The primary use case for setting this field is for a StatefulSet’s Headless Service to +propagate SRV DNS records for its Pods for the purpose of peer discovery. +The Kubernetes controllers that generate Endpoints and EndpointSlice resources for +Services interpret this to mean that all endpoints are considered “ready” even if the +Pods themselves are not. Agents which consume only Kubernetes generated endpoints +through the Endpoints or EndpointSlice resources can safely assume this behavior. +

+ +
+ +`sessionAffinityConfig`
+ + +Kubernetes core/v1.SessionAffinityConfig + + + +
+ +(Optional) + +

+sessionAffinityConfig contains the configurations of session affinity. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this +service. This field is usually assigned automatically based on cluster +configuration and the ipFamilyPolicy field. If this field is specified +manually, the requested family is available in the cluster, +and ipFamilyPolicy allows it, it will be used; otherwise creation of +the service will fail. This field is conditionally mutable: it allows +for adding or removing a secondary IP family, but it does not allow +changing the primary IP family of the Service. Valid values are “IPv4” +and “IPv6”. This field only applies to Services of types ClusterIP, +NodePort, and LoadBalancer, and does apply to “headless” services. +This field will be wiped when updating a Service to type ExternalName. +

+ +

+This field may hold a maximum of two entries (dual-stack families, in +either order). These families must correspond to the values of the +clusterIPs field, if specified. Both clusterIPs and ipFamilies are +governed by the ipFamilyPolicy field. +

+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+IPFamilyPolicy represents the dual-stack-ness requested or required by +this Service. If there is no value provided, then this field will be set +to SingleStack. Services can be “SingleStack” (a single IP family), +“PreferDualStack” (two IP families on dual-stack configured clusters or +a single IP family on single-stack clusters), or “RequireDualStack” +(two IP families on dual-stack configured clusters, otherwise fail). The +ipFamilies and clusterIPs fields depend on the value of this field. This +field will be wiped when updating a service to type ExternalName. +

+ +
+ +`allocateLoadBalancerNodePorts`
+ +bool + + +
+ +(Optional) + +

+allocateLoadBalancerNodePorts defines if NodePorts will be automatically +allocated for services with type LoadBalancer. Default is “true”. It +may be set to “false” if the cluster load-balancer does not rely on +NodePorts. If the caller requests specific NodePorts (by specifying a +value), those requests will be respected, regardless of this field. +This field may only be set for services with type LoadBalancer and will +be cleared if the type is changed to any other type. +

+ +
+ +`loadBalancerClass`
+ +string + + +
+ +(Optional) + +

+loadBalancerClass is the class of the load balancer implementation this Service belongs to. +If specified, the value of this field must be a label-style identifier, with an optional prefix, +e.g. “internal-vip” or “example.com/internal-vip”. Unprefixed names are reserved for end-users. +This field can only be set when the Service type is ‘LoadBalancer’. If not set, the default load +balancer implementation is used, today this is typically done through the cloud provider integration, +but should apply for any default implementation. If set, it is assumed that a load balancer +implementation is watching for Services with a matching class. Any default load balancer +implementation (e.g. cloud providers) should ignore Services that set this field. +This field can only be set when creating or updating a Service to type ‘LoadBalancer’. +Once set, it can not be changed. This field will be wiped when a service is updated to a non ‘LoadBalancer’ type. +

+ +
+ +`internalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceInternalTrafficPolicy + + + +
+ +(Optional) + +

+InternalTrafficPolicy describes how nodes distribute service traffic they +receive on the ClusterIP. If set to “Local”, the proxy will assume that pods +only want to talk to endpoints of the service on the same node as the pod, +dropping the traffic if there are no local endpoints. The default value, +“Cluster”, uses the standard behavior of routing to all endpoints evenly +(possibly modified by topology and other features). +

+ +
+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Extends the above `serviceSpec.selector` by allowing you to specify defined role as selector for the service. +When `roleSelector` is set, it adds a label selector “kubeblocks.io/role: {roleSelector}” +to the `serviceSpec.selector`. +Example usage: +

+
+
+  roleSelector: "leader"
+
+
+ +

+In this example, setting `roleSelector` to “leader” will add a label selector +“kubeblocks.io/role: leader” to the `serviceSpec.selector`. +This means that the service will select and route traffic to Pods with the label +“kubeblocks.io/role” set to “leader”. +

+ +

+Note that if `podService` sets to true, RoleSelector will be ignored. +The `podService` flag takes precedence over `roleSelector` and generates a service for each Pod. +

+ +
+

+ServiceDescriptorSpec + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorSpec defines the desired state of ServiceDescriptor. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+

+ServiceDescriptorStatus + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorStatus defines the observed state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the current lifecycle phase of the ServiceDescriptor. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ServiceConnectionCredential. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been processed by the controller. +

+ +
+

+ServicePort + +

+ +

+ +(Appears on:ServiceSpec) + +

+
+ +

+ServicePort is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of this port within the service. This must be a DNS_LABEL. +All ports within a ServiceSpec must have unique names. When considering +the endpoints for a Service, this must match the ‘name’ field in the +EndpointPort. +

+ +
+ +`protocol`
+ + +Kubernetes core/v1.Protocol + + + +
+ +(Optional) + +

+The IP protocol for this port. Supports “TCP”, “UDP”, and “SCTP”. +Default is TCP. +

+ +
+ +`appProtocol`
+ +string + + +
+ +(Optional) + +

+The application protocol for this port. +This field follows standard Kubernetes label syntax. +Un-prefixed names are reserved for IANA standard service names (as per +RFC-6335 and https://www.iana.org/assignments/service-names). +Non-standard protocols should use prefixed names such as +mycompany.com/my-custom-protocol. +

+ +
+ +`port`
+ +int32 + + +
+ + +

+The port that will be exposed by this service. +

+ +
+ +`targetPort`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Number or name of the port to access on the pods targeted by the service. +

+ +

+Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. +

+
    +
  • +If this is a string, it will be looked up as a named port in the target Pod’s container ports. +
  • +
  • +If this is not specified, the value of the `port` field is used (an identity map). +
  • +
+ +

+This field is ignored for services with clusterIP=None, and should be +omitted or set equal to the `port` field. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service +

+ +
+

+ServiceRef + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the service reference declaration. +It corresponds to the serviceRefDeclaration name defined in either: +

+
    +
  • +`componentDefinition.spec.serviceRefDeclarations[*].name` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].serviceRefDeclarations[*].name` (deprecated) +
  • +
+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced Cluster or the namespace of the referenced ServiceDescriptor object. +If not provided, the referenced Cluster and ServiceDescriptor will be searched in the namespace of the current +Cluster by default. +

+ +
+ +`cluster`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the KubeBlocks Cluster being referenced. +This is used when services from another KubeBlocks Cluster are consumed. +

+ +

+By default, the referenced KubeBlocks Cluster’s `clusterDefinition.spec.connectionCredential` +will be utilized to bind to the current Component. This credential should include: +`endpoint`, `port`, `username`, and `password`. +

+ +

+Note: +

+
    +
  • +The `ServiceKind` and `ServiceVersion` specified in the service reference within the +ClusterDefinition are not validated when using this approach. +
  • +
  • +If both `cluster` and `serviceDescriptor` are present, `cluster` will take precedence. +
  • +
+ +

+Deprecated since v0.9 since `clusterDefinition.spec.connectionCredential` is deprecated, +use `clusterServiceSelector` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`clusterServiceSelector`
+ + +ServiceRefClusterSelector + + + +
+ +(Optional) + +

+References a service provided by another KubeBlocks Cluster. +It specifies the ClusterService and the account credentials needed for access. +

+ +
+ +`serviceDescriptor`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceDescriptor object that describes a service provided by external sources. +

+ +

+When referencing a service provided by external sources, a ServiceDescriptor object is required to establish +the service binding. +The `serviceDescriptor.spec.serviceKind` and `serviceDescriptor.spec.serviceVersion` should match the serviceKind +and serviceVersion declared in the definition. +

+ +

+If both `cluster` and `serviceDescriptor` are specified, the `cluster` takes precedence. +

+ +
+

+ServiceRefClusterSelector + +

+ +

+ +(Appears on:ServiceRef) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cluster`
+ +string + + +
+ + +

+The name of the Cluster being referenced. +

+ +
+ +`service`
+ + +ServiceRefServiceSelector + + + +
+ +(Optional) + +

+Identifies a ClusterService from the list of Services defined in `cluster.spec.services` of the referenced Cluster. +

+ +
+ +`credential`
+ + +ServiceRefCredentialSelector + + + +
+ +(Optional) + +

+Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. +The SystemAccount should be defined in `componentDefinition.spec.systemAccounts` +of the Component providing the service in the referenced Cluster. +

+ +
+

+ServiceRefCredentialSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the credential resides in. +

+ +
+ +`name`
+ +string + + +
+ + +

+The name of the credential (SystemAccount) to reference. +

+ +
+

+ServiceRefDeclaration + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+ +

+ServiceRefDeclaration represents a reference to a service that can be either provided by a KubeBlocks Cluster +or an external service. +It acts as a placeholder for the actual service reference, which is determined later when a Cluster is created. +

+ +

+The purpose of ServiceRefDeclaration is to declare a service dependency without specifying the concrete details +of the service. +It allows for flexibility and abstraction in defining service references within a Component. +By using ServiceRefDeclaration, you can define service dependencies in a declarative manner, enabling loose coupling +and easier management of service references across different components and clusters. +

+ +

+Upon Cluster creation, the ServiceRefDeclaration is bound to an actual service through the ServiceRef field, +effectively resolving and connecting to the specified service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the ServiceRefDeclaration. +

+ +
+ +`serviceRefDeclarationSpecs`
+ + +[]ServiceRefDeclarationSpec + + + +
+ + +

+Defines a list of constraints and requirements for services that can be bound to this ServiceRefDeclaration +upon Cluster creation. +Each ServiceRefDeclarationSpec defines a ServiceKind and ServiceVersion, +outlining the acceptable service types and versions that are compatible. +

+ +

+This flexibility allows a ServiceRefDeclaration to be fulfilled by any one of the provided specs. +For example, if it requires an OLTP database, specs for both MySQL and PostgreSQL are listed, +either MySQL or PostgreSQL services can be used when binding. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the service reference can be optional. +

+ +

+For an optional service-ref, the component can still be created even if the service-ref is not provided. +

+ +
+

+ServiceRefDeclarationSpec + +

+ +

+ +(Appears on:ServiceRefDeclaration) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Specifies the type or nature of the service. This should be a well-known application cluster type, such as +{mysql, redis, mongodb}. +The field is case-insensitive and supports abbreviations for some well-known databases. +For instance, both `zk` and `zookeeper` are considered as a ZooKeeper cluster, while `pg`, `postgres`, `postgresql` +are all recognized as a PostgreSQL cluster. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Defines the service version of the service reference. This is a regular expression that matches a version number pattern. +For instance, `^8.0.8$`, `8.0.\d{1,2}$`, `^[v\-]*?(\d{1,2}\.){0,3}\d{1,2}$` are all valid patterns. +

+ +
+

+ServiceRefServiceSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ +(Optional) + +

+The name of the Component where the Service resides in. +

+ +

+It is required when referencing a Component’s Service. +

+ +
+ +`service`
+ +string + + +
+ + +

+The name of the Service to be referenced. +

+ +

+Leave it empty to reference the default Service. Set it to “headless” to reference the default headless Service. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name,service2.name… +

+ +
+ +`port`
+ +string + + +
+ +(Optional) + +

+The port name of the Service to be referenced. +

+ +

+If there is a non-zero node-port exist for the matched Service port, the node-port will be selected first. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ServiceRefVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceRefVarSelector selects a var from a ServiceRefDeclaration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The ServiceRefDeclaration to select from. +

+ +
+ +`ServiceRefVars`
+ + +ServiceRefVars + + + +
+ + +

+ +(Members of `ServiceRefVars` are embedded into this type.) + +

+ +
+

+ServiceRefVars + +

+ +

+ +(Appears on:ServiceRefVarSelector) + +

+
+ +

+ServiceRefVars defines the vars that can be referenced from a ServiceRef. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`endpoint`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`port`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+ServiceSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ServiceSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ports`
+ + +[]ServicePort + + + +
+ +(Optional) + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+

+ServiceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceVarSelector selects a var from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Service to select from. +It can be referenced from the default headless service by setting the name to “headless”. +

+ +
+ +`ServiceVars`
+ + +ServiceVars + + + +
+ + +

+ +(Members of `ServiceVars` are embedded into this type.) + +

+ +
+

+ServiceVars + +

+ +

+ +(Appears on:ServiceVarSelector) + +

+
+ +

+ServiceVars defines the vars that can be referenced from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`loadBalancer`
+ + +VarOption + + + +
+ +(Optional) + +

+LoadBalancer represents the LoadBalancer ingress point of the service. +

+ +

+If multiple ingress points are available, the first one will be used automatically, choosing between IP and Hostname. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Port references a port or node-port defined in the service. +

+ +

+If the referenced service is a pod-service, there will be multiple service objects matched, +and the value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ShardingSpec + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. +A typical design pattern for distributed databases is to distribute data across multiple shards, +with each shard consisting of multiple replicas. +Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components +using a template when shards are added. +When shards are removed, the corresponding Components are also deleted. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the common parent part of all shard names. +This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. +It is used to generate the names of underlying Components following the pattern `$(shardingSpec.name)-$(ShardID)`. +ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. +For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name +would be “my-shard-abc”. +

+ +

+Note that the name defined in Component template(`shardingSpec.template.name`) will be disregarded +when generating the Component names of the shards. The `shardingSpec.name` field takes precedence. +

+ +
+ +`template`
+ + +ClusterComponentSpec + + + +
+ + +

+The template for generating Components for shards, where each shard consists of one Component. +This field is of type ClusterComponentSpec, which encapsulates all the required details and +definitions for creating and managing the Components. +KubeBlocks uses this template to generate a set of identical Components or shards. +All the generated Components will have the same specifications and definitions as specified in the `template` field. +

+ +

+This allows for the creation of multiple Components with consistent configurations, +enabling sharding and distribution of workloads across Components. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards. +Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including: +

+
    +
  • +Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned. +
  • +
  • +Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted. +
  • +
+ +
+

+StatefulSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ConsensusSetSpec, ReplicationSetSpec) + +

+
+ +

+StatefulSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating Pods. +For workloadType=`Consensus`, the update strategy can be one of the following: +

+
    +
  • +`Serial`: Updates Members sequentially to minimize component downtime. +
  • +
  • +`BestEffortParallel`: Updates Members in parallel to minimize component write downtime. Majority remains online +at all times. +
  • +
  • +`Parallel`: Forces parallel updates. +
  • +
+ +
+ +`llPodManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`llUpdateStrategy`
+ + +Kubernetes apps/v1.StatefulSetUpdateStrategy + + + +
+ +(Optional) + +

+Specifies the low-level StatefulSetUpdateStrategy to be used when updating Pods in the StatefulSet upon a +revision to the Template. +`UpdateStrategy` will be ignored if this is provided. +

+ +
+

+StatefulSetWorkload + +

+
+ +

+StatefulSetWorkload interface +

+
+

+StatelessSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+StatelessSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`updateStrategy`
+ + +Kubernetes apps/v1.DeploymentStrategy + + + +
+ +(Optional) + +

+Specifies the deployment strategy that will be used to replace existing pods with new ones. +

+ +
+

+SwitchPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSwitchPolicy) + +

+
+ +

+SwitchPolicyType defines the types of switch policies that can be applied to a cluster. +

+ +

+Currently, only the Noop policy is supported. Support for MaximumAvailability and MaximumDataProtection policies is +planned for future releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"MaximumAvailability" +

+
+ +

+MaximumAvailability represents a switch policy that aims for maximum availability. This policy will switch if the +primary is active and the synchronization delay is 0 according to the user-defined lagProbe data delay detection +logic. If the primary is down, it will switch immediately. +This policy is intended for future support. +

+ +
+ +

+"MaximumDataProtection" +

+
+ +

+MaximumDataProtection represents a switch policy focused on maximum data protection. This policy will only switch +if the primary is active and the synchronization delay is 0, based on the user-defined lagProbe data lag detection +logic. If the primary is down, it will switch only if it can be confirmed that the primary and secondary data are +consistent. Otherwise, it will not switch. +This policy is planned for future implementation. +

+ +
+ +

+"Noop" +

+
+ +

+Noop indicates that KubeBlocks will not perform any high-availability switching for the components. Users are +required to implement their own HA solution or integrate an existing open-source HA solution. +

+ +
+

+SwitchoverAction + +

+ +

+ +(Appears on:SwitchoverSpec) + +

+
+ +

+SwitchoverAction is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Specifies the switchover command. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to select the script that need to be referenced. +When defined, the scripts defined in scriptSpecs can be referenced within the SwitchoverAction.CmdExecutorConfig. +

+ +
+

+SwitchoverSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+SwitchoverSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withCandidate`
+ + +SwitchoverAction + + + +
+ +(Optional) + +

+Represents the action of switching over to a specified candidate primary or leader instance. +

+ +
+ +`withoutCandidate`
+ + +SwitchoverAction + + + +
+ +(Optional) + +

+Represents the action of switching over without specifying a candidate primary or leader instance. +

+ +
+

+SystemAccount + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the unique identifier for the account. This name is used by other entities to reference the account. +

+ +

+This field is immutable once set. +

+ +
+ +`initAccount`
+ +bool + + +
+ +(Optional) + +

+Indicates if this account is a system initialization account (e.g., MySQL root). +

+ +

+This field is immutable once set. +

+ +
+ +`statement`
+ +string + + +
+ +(Optional) + +

+Defines the statement used to create the account with the necessary privileges. +

+ +

+This field is immutable once set. +

+ +
+ +`passwordGenerationPolicy`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+This field is immutable once set. +

+ +
+

+SystemAccountConfig + +

+ +

+ +(Appears on:SystemAccountSpec) + +

+
+ +

+SystemAccountConfig specifies how to create and delete system accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +AccountName + + + +
+ + +

+The unique identifier of a system account. +

+ +
+ +`provisionPolicy`
+ + +ProvisionPolicy + + + +
+ + +

+Outlines the strategy for creating the account. +

+ +
+

+SystemAccountSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+SystemAccountSpec specifies information to create system accounts. +

+ +

+Deprecated since v0.8, be replaced by `componentDefinition.spec.systemAccounts` and +`componentDefinition.spec.lifecycleActions.accountProvision`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Configures how to obtain the client SDK and execute statements. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ + +

+Defines the pattern used to generate passwords for system accounts. +

+ +
+ +`accounts`
+ + +[]SystemAccountConfig + + + +
+ + +

+Defines the configuration settings for system accounts. +

+ +
+

+TLSConfig + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enable`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, +such as specifying TLS certificates and keys, to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+

+TLSSecretRef + +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+TLSSecretRef defines Secret contains Tls certs +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the Secret that contains user-provided certificates. +

+ +
+ +`ca`
+ +string + + +
+ + +

+Key of CA cert in Secret +

+ +
+ +`cert`
+ +string + + +
+ + +

+Key of Cert in Secret +

+ +
+ +`key`
+ +string + + +
+ + +

+Key of TLS private key in Secret +

+ +
+

+TargetPodSelector +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+TargetPodSelector defines how to select pod(s) to execute an Action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+ +

+"Ordinal" +

+
+ +
+ +

+"Role" +

+
+ +
+

+TenancyType +(`string` alias) +

+ +

+ +(Appears on:Affinity, ClusterSpec) + +

+
+ +

+TenancyType defines the type of tenancy for cluster tenant resources. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"DedicatedNode" +

+
+ +

+DedicatedNode means each pod runs on their own dedicated node. +

+ +
+ +

+"SharedNode" +

+
+ +

+SharedNode means multiple pods may share the same node. +

+ +
+

+TerminationPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+TerminationPolicyType defines termination policy types. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+Delete is based on Halt and deletes PVCs. +

+ +
+ +

+"DoNotTerminate" +

+
+ +

+DoNotTerminate will block delete operation. +

+ +
+ +

+"Halt" +

+
+ +

+Halt will delete workload resources such as statefulset, deployment workloads but keep PVCs. +

+ +
+ +

+"WipeOut" +

+
+ +

+WipeOut is based on Delete and wipe out all volume snapshots and snapshot data from backup storage location. +

+ +
+

+UpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentDefinitionSpec, StatefulSetSpec) + +

+
+ +

+UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied +across the cluster. +The available strategies are `Serial`, `BestEffortParallel`, and `Parallel`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +

+BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making +a best-effort attempt to update as many replicas as possible concurrently +while maintaining the component’s availability. +Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number +of replicas remain available during the update process to maintain the component’s quorum and functionality. +

+ +

+For example, consider a component with 5 replicas. To maintain the component’s availability and quorum, +the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least +3 replicas (a quorum) remain available and functional during the update process. +

+ +

+The `BestEffortParallel` strategy strikes a balance between update speed and component availability. +

+ +
+ +

+"Parallel" +

+
+ +

+ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component. +The replicas are updated in parallel, with the operator updating all replicas concurrently. +This strategy provides the fastest update time but may lead to a period of reduced availability or +capacity during the update process. +

+ +
+ +

+"Serial" +

+
+ +

+SerialStrategy indicates that updates are applied one at a time in a sequential manner. +The operator waits for each replica to be updated and ready before proceeding to the next one. +This ensures that only one replica is unavailable at a time during the update process. +

+ +
+

+UpgradePolicy +(`string` alias) +

+
+ +

+UpgradePolicy defines the policy of reconfiguring. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"autoReload" +

+
+ +
+ +

+"dynamicReloadBeginRestart" +

+
+ +
+ +

+"none" +

+
+ +
+ +

+"simple" +

+
+ +
+ +

+"parallel" +

+
+ +
+ +

+"rolling" +

+
+ +
+ +

+"operatorSyncUpdate" +

+
+ +
+

+UserResourceRefs + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+UserResourceRefs defines references to user-defined Secrets and ConfigMaps. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`secretRefs`
+ + +[]SecretRef + + + +
+ +(Optional) + +

+SecretRefs defines the user-defined Secrets. +

+ +
+ +`configMapRefs`
+ + +[]ConfigMapRef + + + +
+ +(Optional) + +

+ConfigMapRefs defines the user-defined ConfigMaps. +

+ +
+

+VarOption +(`string` alias) +

+ +

+ +(Appears on:ComponentVars, CredentialVars, NamedVar, ServiceRefVars, ServiceVars) + +

+
+ +

+VarOption defines whether a variable is required or optional. +

+
+

+VarSource + +

+ +

+ +(Appears on:EnvVar) + +

+
+ +

+VarSource represents a source for the value of an EnvVar. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+ +`hostNetworkVarRef`
+ + +HostNetworkVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of host-network resources. +

+ +
+ +`serviceVarRef`
+ + +ServiceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Service. +

+ +
+ +`credentialVarRef`
+ + +CredentialVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Credential (SystemAccount). +

+ +
+ +`serviceRefVarRef`
+ + +ServiceRefVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a ServiceRef. +

+ +
+ +`componentVarRef`
+ + +ComponentVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Component. +

+ +
+

+VolumeProtectionSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+VolumeProtectionSpec is deprecated since v0.9, replaced with ComponentVolume.HighWatermark. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+The high watermark threshold for volume space usage. +If there is any specified volumes who’s space usage is over the threshold, the pre-defined “LOCK” action +will be triggered to degrade the service to protect volume from space exhaustion, such as to set the instance +as read-only. And after that, if all volumes’ space usage drops under the threshold later, the pre-defined +“UNLOCK” action will be performed to recover the service normally. +

+ +
+ +`volumes`
+ + +[]ProtectedVolume + + + +
+ +(Optional) + +

+The Volumes to be protected. +

+ +
+

+VolumeType +(`string` alias) +

+ +

+ +(Appears on:VolumeTypeSpec) + +

+
+ +

+VolumeType defines the type of volume, specifically distinguishing between volumes used for backup data and those used for logs. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"data" +

+
+ +

+VolumeTypeData indicates a volume designated for storing backup data. This type of volume is optimized for the +storage and retrieval of data backups, ensuring data persistence and reliability. +

+ +
+ +

+"log" +

+
+ +

+VolumeTypeLog indicates a volume designated for storing logs. This type of volume is optimized for log data, +facilitating efficient log storage, retrieval, and management. +

+ +
+

+VolumeTypeSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+VolumeTypeSpec is deprecated since v0.9, replaced with ComponentVolume. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Corresponds to the name of the VolumeMounts field in PodSpec.Container. +

+ +
+ +`type`
+ + +VolumeType + + + +
+ +(Optional) + +

+Type of data the volume will persistent. +

+ +
+

+WorkloadType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+WorkloadType defines the type of workload for the components of the ClusterDefinition. +It can be one of the following: `Stateless`, `Stateful`, `Consensus`, or `Replication`. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Consensus" +

+
+ +

+Consensus represents a workload type involving distributed consensus algorithms for coordinated decision-making. +

+ +
+ +

+"Replication" +

+
+ +

+Replication represents a workload type that involves replication, typically used for achieving high availability +and fault tolerance. +

+ +
+ +

+"Stateful" +

+
+ +

+Stateful represents a workload type where components maintain state, and each instance has a unique identity. +

+ +
+ +

+"Stateless" +

+
+ +

+Stateless represents a workload type where components do not maintain state, and instances are interchangeable. +

+ +
+
+

apps.kubeblocks.io/v1beta1

+
+
+Resource Types: + +

+ConfigConstraint + +

+
+ +

+ConfigConstraint manages the parameters across multiple configuration files contained in a single configure template. +These configuration files should have the same format (e.g. ini, xml, properties, json). +

+ +

+It provides the following functionalities: +

+
    +
  1. +Parameter Value Validation: Validates and ensures compliance of parameter values with defined constraints. +
  2. +
  3. +Dynamic Reload on Modification: Monitors parameter changes and triggers dynamic reloads to apply updates. +
  4. +
  5. +Parameter Rendering in Templates: Injects parameters into templates to generate up-to-date configuration files. +
  6. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1beta1` + +
+ +`kind`
+string + +
+`ConfigConstraint` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigConstraintSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +
+ +`status`
+ + +ConfigConstraintStatus + + + +
+ + +
+

+AutoTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+AutoTrigger automatically perform the reload when specified conditions are met. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`processName`
+ +string + + +
+ +(Optional) + +

+The name of the process. +

+ +
+

+CfgFileFormat +(`string` alias) +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+CfgFileFormat defines formatter of configuration files. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"dotenv" +

+
+ +
+ +

+"hcl" +

+
+ +
+ +

+"ini" +

+
+ +
+ +

+"json" +

+
+ +
+ +

+"properties" +

+
+ +
+ +

+"props-plus" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"toml" +

+
+ +
+ +

+"xml" +

+
+ +
+ +

+"yaml" +

+
+ +
+

+ConfigConstraintPhase +(`string` alias) +

+ +

+ +(Appears on:ConfigConstraintStatus, ConfigConstraintStatus) + +

+
+ +

+ConfigConstraintPhase defines the ConfigConstraint CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+ConfigConstraintSpec + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintSpec defines the desired state of ConfigConstraint +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+

+ConfigConstraintStatus + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintStatus represents the observed state of a ConfigConstraint. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ConfigConstraintPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to CCAvailablePhase, the ConfigConstraint can be referenced by ClusterDefinition. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides descriptions for abnormal states. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation observed for this ConfigConstraint. This value is updated by the API Server. +

+ +
+

+DownwardAPIChangeTriggeredAction + +

+ +

+ +(Appears on:ConfigConstraintSpec, ConfigConstraintSpec) + +

+
+ +

+DownwardAPIChangeTriggeredAction defines an action that triggers specific commands in response to changes in Pod labels. +For example, a command might be executed when the ‘role’ label of the Pod is updated. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the field. It must be a string of maximum length 63. +The name should match the regex pattern `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$`. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the mount point of the Downward API volume. +

+ +
+ +`items`
+ + +[]Kubernetes core/v1.DownwardAPIVolumeFile + + + +
+ + +

+Represents a list of files under the Downward API volume. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be triggered when changes are detected in Downward API volume files. +It relies on the inotify mechanism in the config-manager sidecar to monitor file changes. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the DownwardAction to perform specific tasks or configurations. +

+ +
+

+DynamicParameterSelectedPolicy +(`string` alias) +

+
+ +

+DynamicParameterSelectedPolicy determines how to select the parameters of dynamic reload actions +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"all" +

+
+ +
+ +

+"dynamic" +

+
+ +
+

+DynamicReloadType +(`string` alias) +

+
+ +

+DynamicReloadType defines reload method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"auto" +

+
+ +
+ +

+"http" +

+
+ +
+ +

+"sql" +

+
+ +
+ +

+"exec" +

+
+ +
+ +

+"tpl" +

+
+ +
+ +

+"signal" +

+
+ +
+

+FileFormatConfig + +

+ +

+ +(Appears on:ConfigConstraintSpec, ConfigConstraintSpec) + +

+
+ +

+FileFormatConfig specifies the format of the configuration file and any associated parameters +that are specific to the chosen format. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FormatterAction`
+ + +FormatterAction + + + +
+ + +

+ +(Members of `FormatterAction` are embedded into this type.) + +

+(Optional) + +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +
+ +`format`
+ + +CfgFileFormat + + + +
+ + +

+The config file format. Valid values are `ini`, `xml`, `yaml`, `json`, +`hcl`, `dotenv`, `properties` and `toml`. Each format has its own characteristics and use cases. +

+ + +
+

+FormatterAction + +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+FormatterAction configures format-specific options for different configuration file format. +Note: Only one of its members should be specified at any given time. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`iniConfig`
+ + +IniConfig + + + +
+ +(Optional) + +

+Holds options specific to the ‘ini’ file format. +

+ +
+

+IniConfig + +

+ +

+ +(Appears on:FormatterAction) + +

+
+ +

+IniConfig holds options specific to the ‘ini’ file format. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`sectionName`
+ +string + + +
+ +(Optional) + +

+A string that describes the name of the ini section. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ParametersSchema Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topLevelKey`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configSchema.cue’. +

+ +
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schemaInJSON`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+ReloadAction + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ReloadAction defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+ +`targetPodSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `reloadedPodSelector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `reloadedPodSelector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+

+ScriptConfig + +

+ +

+ +(Appears on:ConfigConstraintSpec, DownwardAPIChangeTriggeredAction, ShellTrigger, TPLScriptTrigger) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scriptConfigMapRef`
+ +string + + +
+ + +

+Specifies the reference to the ConfigMap containing the scripts. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace for the ConfigMap. +If not specified, it defaults to the “default” namespace. +

+ +
+

+ShellTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+ShellTrigger allows to execute a custom shell script to reload the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+Specifies the command to execute in order to reload the process. It should be a valid shell command. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines the synchronization mode of parameter updates with “config-manager”. +

+
    +
  • +‘True’: Executes reload actions synchronously, pausing until completion. +
  • +
  • +‘False’: Executes reload actions asynchronously, without waiting for completion. +
  • +
+ +
+ +`batchReload`
+ +bool + + +
+ +(Optional) + +

+Controls whether parameter updates are processed individually or collectively in a batch: +

+
    +
  • +‘True’: Processes all changes in one batch reload. +
  • +
  • +‘False’: Processes each change individually. +
  • +
+ +

+Defaults to ‘False’ if unspecified. +

+ +
+ +`batchParamsFormatterTemplate`
+ +string + + +
+ +(Optional) + +

+Specifies a Go template string for formatting batch input data. +It’s used when `batchReload` is ‘True’ to format data passed into STDIN of the script. +The template accesses key-value pairs of updated parameters via the ‘$’ variable. +This allows for custom formatting of the input data. +

+ +

+Example template: +

+
+
+batchParamsFormatterTemplate: |-
+{{- range $pKey, $pValue := $ }}
+{{ printf "%s:%s" $pKey $pValue }}
+{{- end }}
+
+
+ +

+This example generates batch input data in a key:value format, sorted by keys. +

+
+
+key1:value1
+key2:value2
+key3:value3
+
+
+ +

+If not specified, the default format is key=value, sorted by keys, for each updated parameter. +

+
+
+key1=value1
+key2=value2
+key3=value3
+
+
+ +
+ +`toolsSetup`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload. +

+ +
+

+SignalType +(`string` alias) +

+ +

+ +(Appears on:UnixSignalTrigger) + +

+
+ +

+SignalType defines which signals are valid. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"SIGABRT" +

+
+ +
+ +

+"SIGALRM" +

+
+ +
+ +

+"SIGBUS" +

+
+ +
+ +

+"SIGCHLD" +

+
+ +
+ +

+"SIGCONT" +

+
+ +
+ +

+"SIGFPE" +

+
+ +
+ +

+"SIGHUP" +

+
+ +
+ +

+"SIGILL" +

+
+ +
+ +

+"SIGINT" +

+
+ +
+ +

+"SIGIO" +

+
+ +
+ +

+"SIGKILL" +

+
+ +
+ +

+"SIGPIPE" +

+
+ +
+ +

+"SIGPROF" +

+
+ +
+ +

+"SIGPWR" +

+
+ +
+ +

+"SIGQUIT" +

+
+ +
+ +

+"SIGSEGV" +

+
+ +
+ +

+"SIGSTKFLT" +

+
+ +
+ +

+"SIGSTOP" +

+
+ +
+ +

+"SIGSYS" +

+
+ +
+ +

+"SIGTERM" +

+
+ +
+ +

+"SIGTRAP" +

+
+ +
+ +

+"SIGTSTP" +

+
+ +
+ +

+"SIGTTIN" +

+
+ +
+ +

+"SIGTTOU" +

+
+ +
+ +

+"SIGURG" +

+
+ +
+ +

+"SIGUSR1" +

+
+ +
+ +

+"SIGUSR2" +

+
+ +
+ +

+"SIGVTALRM" +

+
+ +
+ +

+"SIGWINCH" +

+
+ +
+ +

+"SIGXCPU" +

+
+ +
+ +

+"SIGXFSZ" +

+
+ +
+

+TPLScriptTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+TPLScriptTrigger Enables reloading process using a Go template script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ScriptConfig`
+ + +ScriptConfig + + + +
+ + +

+ +(Members of `ScriptConfig` are embedded into this type.) + +

+ +

+Specifies the ConfigMap that contains the script to be executed for reload. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines whether parameter updates should be synchronized with the “config-manager”. +Specifies the controller’s reload strategy: +

+
    +
  • +If set to ‘True’, the controller executes the reload action in synchronous mode, +pausing execution until the reload completes. +
  • +
  • +If set to ‘False’, the controller executes the reload action in asynchronous mode, +updating the ConfigMap without waiting for the reload process to finish. +
  • +
+ +
+

+ToolConfig + +

+ +

+ +(Appears on:ToolsSetup) + +

+
+ +

+ToolConfig specifies the settings of an init container that prepare tools for dynamic reload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the init container. +

+ +
+ +`asContainerImage`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the tool image should be used as the container image for a sidecar. +This is useful for large tool images, such as those for C++ tools, which may depend on +numerous libraries (e.g., *.so files). +

+ +

+If enabled, the tool image is deployed as a sidecar container image. +

+ +

+Examples: +

+
+
+ toolsSetup::
+   mountPoint: /kb_tools
+   toolConfigs:
+     - name: kb-tools
+       asContainerImage: true
+       image:  apecloud/oceanbase:4.2.0.0-100010032023083021
+
+
+ +

+generated containers: +

+
+
+initContainers:
+ - name: install-config-manager-tool
+   image: apecloud/kubeblocks-tools:${version}
+   command:
+   - cp
+   - /bin/config_render
+   - /opt/tools
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+containers:
+ - name: config-manager
+   image: apecloud/oceanbase:4.2.0.0-100010032023083021
+   imagePullPolicy: IfNotPresent
+	  command:
+   - /opt/tools/reloader
+   - --log-level
+   - info
+   - --operator-update-enable
+   - --tcp
+   - "9901"
+   - --config
+   - /opt/config-manager/config-manager.yaml
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+
+
+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the tool container image. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed by the init container. +

+ +
+

+ToolsSetup + +

+ +

+ +(Appears on:ConfigConstraintSpec, ShellTrigger) + +

+
+ +

+ToolsSetup prepares the tools for dynamic reloads used in ShellTrigger from a specified container image. +

+ +

+Example: +

+
+
+
+toolsSetup:
+	 mountPoint: /kb_tools
+	 toolConfigs:
+	   - name: kb-tools
+	     command:
+	       - cp
+	       - /bin/ob-tools
+	       - /kb_tools/obtools
+	     image: docker.io/apecloud/obtools
+
+
+ +

+This example copies the “/bin/ob-tools” binary from the image to “/kb_tools/obtools”. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the directory path in the container where the tools-related files are to be copied. +This field is typically used with an emptyDir volume to ensure a temporary, empty directory is provided at pod creation. +

+ +
+ +`toolConfigs`
+ + +[]ToolConfig + + + +
+ +(Optional) + +

+Specifies a list of settings of init containers that prepare tools for dynamic reload. +

+ +
+

+UnixSignalTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+UnixSignalTrigger is used to trigger a reload by sending a specific Unix signal to the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`signal`
+ + +SignalType + + + +
+ + +

+Specifies a valid Unix signal to be sent. +For a comprehensive list of all Unix signals, see: ../../pkg/configuration/configmap/handler.go:allUnixSignals +

+ +
+ +`processName`
+ +string + + +
+ + +

+Identifies the name of the process to which the Unix signal will be sent. +

+ +
+
+

workloads.kubeblocks.io/v1

+
+
+Resource Types: + +

+InstanceSet + +

+
+ +

+InstanceSet is the Schema for the instancesets API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`workloads.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`InstanceSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Contains the metadata for the particular object, such as name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +InstanceSetSpec + + + +
+ + +

+Defines the desired state of the state machine. It includes the configuration details for the state machine. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +parallel: force parallel +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. Instanceset obtains role through pods’ role label `kubeblocks.io/role`. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`templateVars`
+ +map[string]string + + +
+ +(Optional) + +

+Provides variables which are used to call Actions. +

+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`configs`
+ + +[]ConfigTemplate + + + +
+ +(Optional) + +

+Describe the configs to be reconfigured. +

+ +
+ +
+ +`status`
+ + +InstanceSetStatus + + + +
+ + +

+Represents the current information about the state machine. This data may be out of date. +

+ +
+

+ConditionType +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"InstanceAvailable" +

+
+ +

+InstanceAvailable ConditionStatus will be True if all instances(pods) are in the ready condition +and continue for “MinReadySeconds” seconds. Otherwise, it will be set to False. +

+ +
+ +

+"InstanceFailure" +

+
+ +

+InstanceFailure is added in an instance set when at least one of its instances(pods) is in a `Failed` phase. +

+ +
+ +

+"InstanceReady" +

+
+ +

+InstanceReady is added in an instance set when at least one of its instances(pods) is in a Ready condition. +ConditionStatus will be True if all its instances(pods) are in a Ready condition. +Or, a NotReady reason with not ready instances encoded in the Message filed will be set. +

+ +
+ +

+"InstanceUpdateRestricted" +

+
+ +

+InstanceUpdateRestricted represents a ConditionType that indicates updates to an InstanceSet are blocked(when the +PodUpdatePolicy is set to StrictInPlace but the pods cannot be updated in-place). +

+ +
+

+ConfigTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the config. +

+ +
+ +`generation`
+ +int64 + + +
+ + +

+The generation of the config. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+The custom reconfigure action. +

+ +
+ +`reconfigureActionName`
+ +string + + +
+ +(Optional) + +

+The name of the custom reconfigure action. +

+ +

+An empty name indicates that the reconfigure action is the default one defined by lifecycle actions. +

+ +
+ +`parameters`
+ +map[string]string + + +
+ +(Optional) + +

+The parameters to call the reconfigure action. +

+ +
+

+InstanceConfigStatus + +

+ +

+ +(Appears on:InstanceStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the config. +

+ +
+ +`generation`
+ +int64 + + +
+ + +

+The generation of the config. +

+ +
+

+InstanceSetSpec + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetSpec defines the desired state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +parallel: force parallel +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. Instanceset obtains role through pods’ role label `kubeblocks.io/role`. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`templateVars`
+ +map[string]string + + +
+ +(Optional) + +

+Provides variables which are used to call Actions. +

+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`configs`
+ + +[]ConfigTemplate + + + +
+ +(Optional) + +

+Describe the configs to be reconfigured. +

+ +
+

+InstanceSetStatus + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetStatus defines the observed state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the +InstanceSet’s generation, which is updated on mutation by the API Server. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+replicas is the number of instances created by the InstanceSet controller. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ + +

+readyReplicas is the number of instances created for this InstanceSet with a Ready Condition. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ + +

+updatedReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+ +`currentRevision`
+ +string + + +
+ + +

+currentRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the +sequence [0,currentReplicas). +

+ +
+ +`updateRevision`
+ +string + + +
+ + +

+updateRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the sequence +[replicas-updatedReplicas,replicas) +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents the latest available observations of an instanceset’s current state. +Known .status.conditions.type are: “InstanceFailure”, “InstanceReady” +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Total number of available instances (ready for at least minReadySeconds) targeted by this InstanceSet. +

+ +
+ +`initReplicas`
+ +int32 + + +
+ +(Optional) + +

+Defines the initial number of instances when the cluster is first initialized. +This value is set to spec.Replicas at the time of object creation and remains constant thereafter. +Used only when spec.roles set. +

+ +
+ +`readyInitReplicas`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of instances that have already reached the MembersStatus during the cluster initialization stage. +This value remains constant once it equals InitReplicas. +Used only when spec.roles set. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Provides the status of each member in the cluster. +

+ +
+ +`instanceStatus`
+ + +[]InstanceStatus + + + +
+ +(Optional) + +

+Provides the status of each instance in the ITS. +

+ +
+ +`currentRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`updateRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`templatesStatus`
+ + +[]InstanceTemplateStatus + + + +
+ +(Optional) + +

+TemplatesStatus represents status of each instance generated by InstanceTemplates +

+ +
+

+InstanceStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`configs`
+ + +[]InstanceConfigStatus + + + +
+ +(Optional) + +

+The status of configs. +

+ +
+

+InstanceTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+

+InstanceTemplateStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+ +

+InstanceTemplateStatus aggregates the status of replicas for each InstanceTemplate +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name, the name of the InstanceTemplate. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Replicas is the number of replicas of the InstanceTemplate. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ +(Optional) + +

+ReadyReplicas is the number of Pods that have a Ready Condition. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+AvailableReplicas is the number of Pods that ready for at least minReadySeconds. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ +(Optional) + +

+UpdatedReplicas is the number of Pods created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+

+MemberStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`role`
+ + +ReplicaRole + + + +
+ +(Optional) + +

+Defines the role of the replica in the cluster. +

+ +
+

+MemberUpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+MemberUpdateStrategy defines Cluster Component update strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+MembershipReconfiguration + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`switchover`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of a role to a new replica. +

+ +
+
+

workloads.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+InstanceSet + +

+
+ +

+InstanceSet is the Schema for the instancesets API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`workloads.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`InstanceSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Contains the metadata for the particular object, such as name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +InstanceSetSpec + + + +
+ + +

+Defines the desired state of the state machine. It includes the configuration details for the state machine. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`service`
+ + +Kubernetes core/v1.Service + + + +
+ +(Optional) + +

+Defines the behavior of a service spec. +Provides read-write service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`updateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ + +

+Indicates the StatefulSetUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Provides method to probe role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`credential`
+ + +Credential + + + +
+ +(Optional) + +

+Credential used to connect to DB engine +

+ +
+ +
+ +`status`
+ + +InstanceSetStatus + + + +
+ + +

+Represents the current information about the state machine. This data may be out of date. +

+ +
+

+AccessMode +(`string` alias) +

+ +

+ +(Appears on:ReplicaRole) + +

+
+ +

+AccessMode defines SVC access mode enums. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"None" +

+
+ +
+ +

+"ReadWrite" +

+
+ +
+ +

+"Readonly" +

+
+ +
+

+Action + +

+ +

+ +(Appears on:MembershipReconfiguration, RoleProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Refers to the utility image that contains the command which can be utilized to retrieve or process role information. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+A set of instructions that will be executed within the Container to retrieve or process role information. This field is required. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Additional parameters used to perform specific statements. This field is optional. +

+ +
+

+ConditionType +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"InstanceAvailable" +

+
+ +

+InstanceAvailable ConditionStatus will be True if all instances(pods) are in the ready condition +and continue for “MinReadySeconds” seconds. Otherwise, it will be set to False. +

+ +
+ +

+"InstanceFailure" +

+
+ +

+InstanceFailure is added in an instance set when at least one of its instances(pods) is in a `Failed` phase. +

+ +
+ +

+"InstanceReady" +

+
+ +

+InstanceReady is added in an instance set when at least one of its instances(pods) is in a Ready condition. +ConditionStatus will be True if all its instances(pods) are in a Ready condition. +Or, a NotReady reason with not ready instances encoded in the Message filed will be set. +

+ +
+ +

+"InstanceUpdateRestricted" +

+
+ +

+InstanceUpdateRestricted represents a ConditionType that indicates updates to an InstanceSet are blocked(when the +PodUpdatePolicy is set to StrictInPlace but the pods cannot be updated in-place). +

+ +
+

+Credential + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ + +

+Defines the user’s name for the credential. +The corresponding environment variable will be KB_ITS_USERNAME. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ + +

+Represents the user’s password for the credential. +The corresponding environment variable will be KB_ITS_PASSWORD. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:Credential) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Specifies the value of the environment variable. This field is optional and defaults to an empty string. +The value can include variable references in the format $(VAR_NAME) which will be expanded using previously defined environment variables in the container and any service environment variables. +

+ +

+If a variable cannot be resolved, the reference in the input string will remain unchanged. +Double $$ can be used to escape the $(VAR_NAME) syntax, resulting in a single $ and producing the string literal “$(VAR_NAME)”. +Escaped references will not be expanded, regardless of whether the variable exists or not. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Defines the source for the environment variable’s value. This field is optional and cannot be used if the ‘Value’ field is not empty. +

+ +
+

+InstanceSetSpec + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetSpec defines the desired state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`service`
+ + +Kubernetes core/v1.Service + + + +
+ +(Optional) + +

+Defines the behavior of a service spec. +Provides read-write service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`updateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ + +

+Indicates the StatefulSetUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Provides method to probe role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`credential`
+ + +Credential + + + +
+ +(Optional) + +

+Credential used to connect to DB engine +

+ +
+

+InstanceSetStatus + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetStatus defines the observed state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the +InstanceSet’s generation, which is updated on mutation by the API Server. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+replicas is the number of instances created by the InstanceSet controller. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ + +

+readyReplicas is the number of instances created for this InstanceSet with a Ready Condition. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ + +

+updatedReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+ +`currentRevision`
+ +string + + +
+ + +

+currentRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the +sequence [0,currentReplicas). +

+ +
+ +`updateRevision`
+ +string + + +
+ + +

+updateRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the sequence +[replicas-updatedReplicas,replicas) +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents the latest available observations of an instanceset’s current state. +Known .status.conditions.type are: “InstanceFailure”, “InstanceReady” +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Total number of available instances (ready for at least minReadySeconds) targeted by this InstanceSet. +

+ +
+ +`initReplicas`
+ +int32 + + +
+ +(Optional) + +

+Defines the initial number of instances when the cluster is first initialized. +This value is set to spec.Replicas at the time of object creation and remains constant thereafter. +Used only when spec.roles set. +

+ +
+ +`readyInitReplicas`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of instances that have already reached the MembersStatus during the cluster initialization stage. +This value remains constant once it equals InitReplicas. +Used only when spec.roles set. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Provides the status of each member in the cluster. +

+ +
+ +`readyWithoutPrimary`
+ +bool + + +
+ +(Optional) + +

+Indicates whether it is required for the InstanceSet to have at least one primary instance ready. +

+ +
+ +`currentRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`updateRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`templatesStatus`
+ + +[]InstanceTemplateStatus + + + +
+ +(Optional) + +

+TemplatesStatus represents status of each instance generated by InstanceTemplates +

+ +
+

+InstanceTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations within a Component, +without altering the base component template defined in ClusterComponentSpec. +It enables the application of distinct settings to specific instances (replicas), +providing flexibility while maintaining a common configuration baseline. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies an override for the first container’s image in the pod. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+Defines Volumes to override. +Add new or override existing volumes. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines VolumeMounts to override. +Add new or override existing volume mounts of the first container in the pod. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Defines VolumeClaimTemplates to override. +Add new or override existing volume claim templates. +

+ +
+

+InstanceTemplateStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+ +

+InstanceTemplateStatus aggregates the status of replicas for each InstanceTemplate +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name, the name of the InstanceTemplate. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Replicas is the number of replicas of the InstanceTemplate. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ +(Optional) + +

+ReadyReplicas is the number of Pods that have a Ready Condition. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+AvailableReplicas is the number of Pods that ready for at least minReadySeconds. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ +(Optional) + +

+UpdatedReplicas is the number of Pods created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceUpdateStrategy indicates the strategy that the InstanceSet +controller will use to perform updates. It includes any additional parameters +necessary to perform the update for the indicated strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`partition`
+ +int32 + + +
+ +(Optional) + +

+Partition indicates the number of pods that should be updated during a rolling update. +The remaining pods will remain untouched. This is helpful in defining how many pods +should participate in the update process. The update process will follow the order +of pod names in descending lexicographical (dictionary) order. The default value is +Replicas (i.e., update all pods). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all pods. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+

+MemberStatus + +

+ +

+ +(Appears on:ClusterComponentStatus, InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`role`
+ + +ReplicaRole + + + +
+ +(Optional) + +

+Defines the role of the replica in the cluster. +

+ +
+

+MemberUpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec, InstanceUpdateStrategy) + +

+
+ +

+MemberUpdateStrategy defines Cluster Component update strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+MembershipReconfiguration + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`switchoverAction`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the environment variables that can be used in all following Actions: +- KB_ITS_USERNAME: Represents the username part of the credential +- KB_ITS_PASSWORD: Represents the password part of the credential +- KB_ITS_LEADER_HOST: Represents the leader host +- KB_ITS_TARGET_HOST: Represents the target host +- KB_ITS_SERVICE_PORT: Represents the service port +

+ +

+Defines the action to perform a switchover. +If the Image is not configured, the latest BusyBox image will be used. +

+ +
+ +`memberJoinAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to add a member. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`memberLeaveAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to remove a member. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`logSyncAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to trigger the new member to start log syncing. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`promoteAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to inform the cluster that the new member can join voting now. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+

+Ordinals + +

+ +

+ +(Appears on:InstanceSetSpec, InstanceTemplate) + +

+
+ +

+Ordinals represents a combination of continuous segments and individual values. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ranges`
+ + +[]Range + + + +
+ + +
+ +`discrete`
+ +[]int32 + + +
+ + +
+

+PodUpdatePolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"PreferInPlace" +

+
+ +

+PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +

+ +
+ +

+"StrictInPlace" +

+
+ +

+StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +

+ +
+

+Range + +

+ +

+ +(Appears on:Ordinals) + +

+
+ +

+Range represents a range with a start and an end value. +It is used to define a continuous segment. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`start`
+ +int32 + + +
+ + +
+ +`end`
+ +int32 + + +
+ + +
+

+ReplicaRole + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec, MemberStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the role name of the replica. +

+ +
+ +`accessMode`
+ + +AccessMode + + + +
+ + +

+Specifies the service capabilities of this member. +

+ +
+ +`canVote`
+ +bool + + +
+ +(Optional) + +

+Indicates if this member has voting rights. +

+ +
+ +`isLeader`
+ +bool + + +
+ +(Optional) + +

+Determines if this member is the leader. +

+ +
+

+RoleProbe + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec) + +

+
+ +

+RoleProbe defines how to observe role +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtinHandlerName`
+ +string + + +
+ +(Optional) + +

+Specifies the builtin handler name to use to probe the role of the main container. +Available handlers include: mysql, postgres, mongodb, redis, etcd, kafka. +Use CustomHandler to define a custom role probe function if none of the built-in handlers meet the requirement. +

+ +
+ +`customHandler`
+ + +[]Action + + + +
+ +(Optional) + +

+Defines a custom method for role probing. +Actions defined here are executed in series. +Upon completion of all actions, the final output should be a single string representing the role name defined in spec.Roles. +The latest BusyBox image will be used if Image is not configured. +Environment variables can be used in Command: +- v_KB_ITS_LASTSTDOUT: stdout from the last action, watch for ‘v’ prefix +- KB_ITS_USERNAME: username part of the credential +- KB_ITS_PASSWORD: password part of the credential +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before initiating role probing. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency (in seconds) of probe execution. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Specifies the minimum number of consecutive successes for the probe to be considered successful after having failed. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Specifies the minimum number of consecutive failures for the probe to be considered failed after having succeeded. +

+ +
+ +`roleUpdateMechanism`
+ + +RoleUpdateMechanism + + + +
+ +(Optional) + +

+Specifies the method for updating the pod role label. +

+ +
+

+RoleUpdateMechanism +(`string` alias) +

+ +

+ +(Appears on:RoleProbe) + +

+
+ +

+RoleUpdateMechanism defines the way how pod role label being updated. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"DirectAPIServerEventUpdate" +

+
+ +
+ +

+"ReadinessProbeEventUpdate" +

+
+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:InstanceTemplate) + +

+
+ +

+SchedulingPolicy the scheduling policy. +Deprecated: Unify with apps/v1alpha1.SchedulingPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/dataprotection.mdx b/docs/en/release-1_0_1/user_docs/references/api-reference/dataprotection.mdx new file mode 100644 index 00000000..cac54810 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/dataprotection.mdx @@ -0,0 +1,11576 @@ +--- +title: Dataprotection API Reference +description: Dataprotection API Reference +keywords: [dataprotection, api] +sidebar_position: 4 +sidebar_label: Dataprotection +--- +
+ +

+Packages: +

+ +

dataprotection.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+ActionSet + +

+
+ +

+ActionSet is the Schema for the actionsets API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ActionSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ActionSetSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupType`
+ + +BackupType + + + +
+ + +

+Specifies the backup type. Supported values include: +

+
    +
  • +`Full` for a full backup. +
  • +
  • +`Incremental` back up data that have changed since the last backup (either full or incremental). +
  • +
  • +`Differential` back up data that has changed since the last full backup. +
  • +
  • +`Continuous` back up transaction logs continuously, such as MySQL binlog, PostgreSQL WAL, etc. +
  • +
  • +`Selective` back up data more precisely, use custom parameters, such as specific databases or tables. +
  • +
+ +

+Continuous backup is essential for implementing Point-in-Time Recovery (PITR). +

+ +
+ +`parametersSchema`
+ + +ActionSetParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema of parameters in backups and restores before their usage. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`envFrom`
+ + +[]Kubernetes core/v1.EnvFromSource + + + +
+ +(Optional) + +

+Specifies a list of sources to populate environment variables in the container. +The keys within a source must be a C_IDENTIFIER. Any invalid keys will be +reported as an event when the container starts. If a key exists in multiple +sources, the value from the last source will take precedence. Any values +defined by an Env with a duplicate key will take precedence. +

+ +

+This field cannot be updated. +

+ +
+ +`backup`
+ + +BackupActionSpec + + + +
+ +(Optional) + +

+Specifies the backup action. +

+ +
+ +`restore`
+ + +RestoreActionSpec + + + +
+ +(Optional) + +

+Specifies the restore action. +

+ +
+ +
+ +`status`
+ + +ActionSetStatus + + + +
+ + +
+

+Backup + +

+
+ +

+Backup is the Schema for the backups API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Backup` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backup policy to be applied for this backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in the backup policy. +

+ +
+ +`deletionPolicy`
+ + +BackupDeletionPolicy + + + +
+ + +

+Determines whether the backup contents stored in the backup repository +should be deleted when the backup custom resource(CR) is deleted. +Supported values are `Retain` and `Delete`. +

+
    +
  • +`Retain` means that the backup content and its physical snapshot on backup repository are kept. +
  • +
  • +`Delete` means that the backup content and its physical snapshot on backup repository are deleted. +
  • +
+ +

+the backup CR but retaining the backup contents in backup repository. +The current implementation only prevent accidental deletion of backup data. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines a duration up to which the backup should be kept. +Controller will remove all backups that are older than the RetentionPeriod. +If not set, the backup will be kept forever. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Determines the parent backup name for incremental or differential backup. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+ +
+ +`status`
+ + +BackupStatus + + + +
+ + +
+

+BackupPolicy + +

+
+ +

+BackupPolicy is the Schema for the backuppolicies API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupPolicy` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupPolicySpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupRepo where the backup data will be stored. +If not set, data will be stored in the default backup repository. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the directory inside the backup repository to store the backup. +This path is relative to the path of the backup repository. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the backup as failed. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ + +

+Specifies the target information to back up, such as the target pod, the +cluster connection credential. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +optional +

+ +
+ +`backupMethods`
+ + +[]BackupMethod + + + +
+ + +

+Defines the backup methods. +

+ +
+ +`useKopia`
+ +bool + + +
+ +(Optional) + +

+Specifies whether backup data should be stored in a Kopia repository. +

+ +

+Data within the Kopia repository is both compressed and encrypted. Furthermore, +data deduplication is implemented across various backups of the same cluster. +This approach significantly reduces the actual storage usage, particularly +for clusters with a low update frequency. +

+ +

+NOTE: This feature should NOT be enabled when using KubeBlocks Community Edition, otherwise the backup will not be processed. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Specifies the parameters for encrypting backup data. +Encryption will be disabled if the field is not set. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Specifies the backup retention policy. This has a precedence over `backup.spec.retentionPeriod`. +

+ +
+ +
+ +`status`
+ + +BackupPolicyStatus + + + +
+ + +
+

+BackupRepo + +

+
+ +

+BackupRepo is a repository for storing backup data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupRepo` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupRepoSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`storageProviderRef`
+ +string + + +
+ + +

+Specifies the name of the `StorageProvider` used by this backup repository. +

+ +
+ +`accessMethod`
+ + +AccessMethod + + + +
+ +(Optional) + +

+Specifies the access method of the backup repository. +

+ +
+ +`volumeCapacity`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the capacity of the PVC created by this backup repository. +

+ +
+ +`pvReclaimPolicy`
+ + +Kubernetes core/v1.PersistentVolumeReclaimPolicy + + + +
+ + +

+Specifies reclaim policy of the PV created by this backup repository. +

+ +
+ +`config`
+ +map[string]string + + +
+ +(Optional) + +

+Stores the non-secret configuration parameters for the `StorageProvider`. +

+ +
+ +`credential`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+References to the secret that holds the credentials for the `StorageProvider`. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the prefix of the path for storing backup data. +

+ +
+ +
+ +`status`
+ + +BackupRepoStatus + + + +
+ + +
+

+BackupSchedule + +

+
+ +

+BackupSchedule is the Schema for the backupschedules API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupSchedule` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupScheduleSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backupPolicy to be applied for the `schedules`. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Defines the deadline in minutes for starting the backup workload if it +misses its scheduled time for any reason. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ + +

+Defines the list of backup schedules. +

+ +
+ +
+ +`status`
+ + +BackupScheduleStatus + + + +
+ + +
+

+Restore + +

+
+ +

+Restore is the Schema for the restores API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Restore` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +RestoreSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backup`
+ + +BackupRef + + + +
+ + +

+Specifies the backup to be restored. The restore behavior is based on the backup type: +

+
    +
  1. +Full: will be restored the full backup directly. +
  2. +
  3. +Incremental: will be restored sequentially from the most recent full backup of this incremental backup. +
  4. +
  5. +Differential: will be restored sequentially from the parent backup of the differential backup. +
  6. +
  7. +Continuous: will find the most recent full backup at this time point and the continuous backups after it to restore. +
  8. +
+ +
+ +`restoreTime`
+ +string + + +
+ +(Optional) + +

+Specifies the point in time for restoring. +

+ +
+ +`resources`
+ + +RestoreKubeResources + + + +
+ +(Optional) + +

+Restores the specified resources of Kubernetes. +

+ +
+ +`prepareDataConfig`
+ + +PrepareDataConfig + + + +
+ +(Optional) + +

+Configuration for the action of “prepareData” phase, including the persistent volume claims +that need to be restored and scheduling strategy of temporary recovery pod. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the service account name needed for recovery pod. +

+ +
+ +`readyConfig`
+ + +ReadyConfig + + + +
+ +(Optional) + +

+Configuration for the action of “postReady” phase. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to set in the container for restore. These will be +merged with the env of Backup and ActionSet. +

+ +

+The priority of merging is as follows: `Restore env > Backup env > ActionSet env`. +

+ +
+ +`containerResources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the required resources of restore job’s container. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the restore failed. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+ +
+ +`status`
+ + +RestoreStatus + + + +
+ + +
+

+StorageProvider + +

+
+ +

+StorageProvider comprises specifications that provide guidance on accessing remote storage. +Currently the supported access methods are via a dedicated CSI driver or the `datasafed` tool. +In case of CSI driver, the specification expounds on provisioning PVCs for that driver. +As for the `datasafed` tool, the specification provides insights on generating the necessary +configuration file. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`StorageProvider` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +StorageProviderSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`csiDriverName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the CSI driver used to access remote storage. +This field can be empty, it indicates that the storage is not accessible via CSI. +

+ +
+ +`csiDriverSecretTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that used to render and generate `k8s.io/api/core/v1.Secret` +resources for a specific CSI driver. +For example, `accessKey` and `secretKey` needed by CSI-S3 are stored in this +`Secret` resource. +

+ +
+ +`storageClassTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template utilized to render and generate `kubernetes.storage.k8s.io.v1.StorageClass` +resources. The `StorageClass’ created by this template is aimed at using the CSI driver. +

+ +
+ +`persistentVolumeClaimTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that renders and generates `k8s.io/api/core/v1.PersistentVolumeClaim` +resources. This PVC can reference the `StorageClass` created from `storageClassTemplate`, +allowing Pods to access remote storage by mounting the PVC. +

+ +
+ +`datasafedConfigTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template used to render and generate `k8s.io/api/core/v1.Secret`. +This `Secret` involves the configuration details required by the `datasafed` tool +to access remote storage. For example, the `Secret` should contain `endpoint`, +`bucket`, ‘region’, ‘accessKey’, ‘secretKey’, or something else for S3 storage. +This field can be empty, it means this kind of storage is not accessible via +the `datasafed` tool. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Describes the parameters required for storage. +The parameters defined here can be referenced in the above templates, +and `kbcli` uses this definition for dynamic command-line parameter parsing. +

+ +
+ +
+ +`status`
+ + +StorageProviderStatus + + + +
+ + +
+

+AccessMethod +(`string` alias) +

+ +

+ +(Appears on:BackupRepoSpec) + +

+
+ +

+AccessMethod represents an enumeration type that outlines +how the `BackupRepo` can be accessed. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Mount" +

+
+ +

+AccessMethodMount suggests that the storage is mounted locally +which allows for remote files to be accessed akin to local ones. +

+ +
+ +

+"Tool" +

+
+ +

+AccessMethodTool indicates the utilization of a command-line +tool for accessing the storage. +

+ +
+

+ActionErrorMode +(`string` alias) +

+ +

+ +(Appears on:ExecActionSpec, JobActionSpec) + +

+
+ +

+ActionErrorMode defines how to handle an error from an action. +Currently, only the Fail mode is supported, but the Continue mode will be supported in the future. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Continue" +

+
+ +

+ActionErrorModeContinue signifies that an error from an action is acceptable and can be ignored. +

+ +
+ +

+"Fail" +

+
+ +

+ActionErrorModeFail signifies that an error from an action is problematic and should be treated as a failure. +

+ +
+

+ActionPhase +(`string` alias) +

+ +

+ +(Appears on:ActionStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +

+ActionPhaseCompleted means the action has run successfully without errors. +

+ +
+ +

+"Failed" +

+
+ +

+ActionPhaseFailed means the action ran but encountered an error that +

+ +
+ +

+"New" +

+
+ +

+ActionPhaseNew means the action has been created but not yet processed by +the BackupController. +

+ +
+ +

+"Running" +

+
+ +

+ActionPhaseRunning means the action is currently executing. +

+ +
+

+ActionSetParametersSchema + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the schema for parameters using the OpenAPI v3. +The supported property types include: +- string +- number +- integer +- array: Note that only items of string type are supported. +

+ +
+

+ActionSetSpec + +

+ +

+ +(Appears on:ActionSet) + +

+
+ +

+ActionSetSpec defines the desired state of ActionSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupType`
+ + +BackupType + + + +
+ + +

+Specifies the backup type. Supported values include: +

+
    +
  • +`Full` for a full backup. +
  • +
  • +`Incremental` back up data that have changed since the last backup (either full or incremental). +
  • +
  • +`Differential` back up data that has changed since the last full backup. +
  • +
  • +`Continuous` back up transaction logs continuously, such as MySQL binlog, PostgreSQL WAL, etc. +
  • +
  • +`Selective` back up data more precisely, use custom parameters, such as specific databases or tables. +
  • +
+ +

+Continuous backup is essential for implementing Point-in-Time Recovery (PITR). +

+ +
+ +`parametersSchema`
+ + +ActionSetParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema of parameters in backups and restores before their usage. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`envFrom`
+ + +[]Kubernetes core/v1.EnvFromSource + + + +
+ +(Optional) + +

+Specifies a list of sources to populate environment variables in the container. +The keys within a source must be a C_IDENTIFIER. Any invalid keys will be +reported as an event when the container starts. If a key exists in multiple +sources, the value from the last source will take precedence. Any values +defined by an Env with a duplicate key will take precedence. +

+ +

+This field cannot be updated. +

+ +
+ +`backup`
+ + +BackupActionSpec + + + +
+ +(Optional) + +

+Specifies the backup action. +

+ +
+ +`restore`
+ + +RestoreActionSpec + + + +
+ +(Optional) + +

+Specifies the restore action. +

+ +
+

+ActionSetStatus + +

+ +

+ +(Appears on:ActionSet) + +

+
+ +

+ActionSetStatus defines the observed state of ActionSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the phase of the ActionSet. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ActionSet. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been observed by the controller. +

+ +
+

+ActionSpec + +

+ +

+ +(Appears on:BackupActionSpec, RestoreActionSpec) + +

+
+ +

+ActionSpec defines an action that should be executed. Only one of the fields may be set. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`exec`
+ + +ExecActionSpec + + + +
+ +(Optional) + +

+Specifies that the action should be executed using the pod’s exec API within a container. +

+ +
+ +`job`
+ + +JobActionSpec + + + +
+ +(Optional) + +

+Specifies that the action should be executed by a Kubernetes Job. +

+ +
+

+ActionStatus + +

+ +

+ +(Appears on:BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the action. +

+ +
+ +`targetPodName`
+ +string + + +
+ + +

+Records the target pod name which has been backed up. +

+ +
+ +`phase`
+ + +ActionPhase + + + +
+ +(Optional) + +

+The current phase of the action. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time an action was started. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time an action was completed. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+An error that caused the action to fail. +

+ +
+ +`actionType`
+ + +ActionType + + + +
+ +(Optional) + +

+The type of the action. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Available replicas for statefulSet action. +

+ +
+ +`objectRef`
+ + +Kubernetes core/v1.ObjectReference + + + +
+ +(Optional) + +

+The object reference for the action. +

+ +
+ +`totalSize`
+ +string + + +
+ +(Optional) + +

+The total size of backed up data size. +A string with capacity units in the format of “1Gi”, “1Mi”, “1Ki”. +If no capacity unit is specified, it is assumed to be in bytes. +

+ +
+ +`timeRange`
+ + +BackupTimeRange + + + +
+ +(Optional) + +

+Records the time range of backed up data, for PITR, this is the time +range of recoverable data. +

+ +
+ +`volumeSnapshots`
+ + +[]VolumeSnapshotStatus + + + +
+ +(Optional) + +

+Records the volume snapshot status for the action. +

+ +
+

+ActionType +(`string` alias) +

+ +

+ +(Appears on:ActionStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Job" +

+
+ +
+ +

+"" +

+
+ +
+ +

+"StatefulSet" +

+
+ +
+

+BackupActionSpec + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupData`
+ + +BackupDataActionSpec + + + +
+ + +

+Represents the action to be performed for backing up data. +

+ +
+ +`preBackup`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Represents a set of actions that should be executed before the backup process begins. +

+ +
+ +`postBackup`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Represents a set of actions that should be executed after the backup process has completed. +

+ +
+ +`preDelete`
+ + +BaseJobActionSpec + + + +
+ +(Optional) + +

+Represents a custom deletion action that can be executed before the built-in deletion action. +Note: The preDelete action job will ignore the env/envFrom. +

+ +
+ +`withParameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters used by the backup action +

+ +
+

+BackupDataActionSpec + +

+ +

+ +(Appears on:BackupActionSpec) + +

+
+ +

+BackupDataActionSpec defines how to back up data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`JobActionSpec`
+ + +JobActionSpec + + + +
+ + +

+ +(Members of `JobActionSpec` are embedded into this type.) + +

+ +
+ +`syncProgress`
+ + +SyncProgress + + + +
+ +(Optional) + +

+Determines if the backup progress should be synchronized and the interval +for synchronization in seconds. +

+ +
+

+BackupDeletionPolicy +(`string` alias) +

+ +

+ +(Appears on:BackupSpec) + +

+
+ +

+BackupDeletionPolicy describes the policy for end-of-life maintenance of backup content. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +
+ +

+"Retain" +

+
+ +
+

+BackupMethod + +

+ +

+ +(Appears on:BackupPolicySpec, BackupStatus) + +

+
+ +

+BackupMethod defines the backup method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of backup method. +

+ +
+ +`compatibleMethod`
+ +string + + +
+ +(Optional) + +

+The name of the compatible full backup method, used by incremental backups. +

+ +
+ +`snapshotVolumes`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to take snapshots of persistent volumes. If true, +the ActionSetName is not required, the controller will use the CSI volume +snapshotter to create the snapshot. +

+ +
+ +`actionSetName`
+ +string + + +
+ +(Optional) + +

+Refers to the ActionSet object that defines the backup actions. +For volume snapshot backup, the actionSet is not required, the controller +will use the CSI volume snapshotter to create the snapshot. +

+ +
+ +`targetVolumes`
+ + +TargetVolumeInfo + + + +
+ +(Optional) + +

+Specifies which volumes from the target should be mounted in the backup workload. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies the environment variables for the backup workload. +

+ +
+ +`runtimeSettings`
+ + +RuntimeSettings + + + +
+ +(Optional) + +

+Specifies runtime settings for the backup workload container. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ +(Optional) + +

+Specifies the target information to back up, it will override the target in backup policy. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +

+ +
+

+BackupMethodTPL + +

+ +

+ +(Appears on:BackupPolicyTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of backup method. +

+ +
+ +`compatibleMethod`
+ +string + + +
+ +(Optional) + +

+The name of the compatible full backup method, used by incremental backups. +

+ +
+ +`snapshotVolumes`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to take snapshots of persistent volumes. If true, +the ActionSetName is not required, the controller will use the CSI volume +snapshotter to create the snapshot. +

+ +
+ +`actionSetName`
+ +string + + +
+ +(Optional) + +

+Refers to the ActionSet object that defines the backup actions. +For volume snapshot backup, the actionSet is not required, the controller +will use the CSI volume snapshotter to create the snapshot. +

+ +
+ +`targetVolumes`
+ + +TargetVolumeInfo + + + +
+ +(Optional) + +

+Specifies which volumes from the target should be mounted in the backup workload. +

+ +
+ +`env`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Specifies the environment variables for the backup workload. +

+ +
+ +`runtimeSettings`
+ + +RuntimeSettings + + + +
+ +(Optional) + +

+Specifies runtime settings for the backup workload container. +

+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+If set, specifies the method for selecting the replica to be backed up using the criteria defined here. +If this field is not set, the selection method specified in `backupPolicy.target` is used. +

+ +

+This field provides a way to override the global `backupPolicy.target` setting for specific BackupMethod. +

+ +
+

+BackupPhase +(`string` alias) +

+ +

+ +(Appears on:BackupStatus) + +

+
+ +

+BackupPhase describes the lifecycle phase of a Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +

+BackupPhaseCompleted means the backup has run successfully without errors. +

+ +
+ +

+"Deleting" +

+
+ +

+BackupPhaseDeleting means the backup and all its associated data are being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+BackupPhaseFailed means the backup ran but encountered an error that +prevented it from completing successfully. +

+ +
+ +

+"New" +

+
+ +

+BackupPhaseNew means the backup has been created but not yet processed by +the BackupController. +

+ +
+ +

+"Running" +

+
+ +

+BackupPhaseRunning means the backup is currently executing. +

+ +
+

+BackupPolicyPhase +(`string` alias) +

+
+ +

+BackupPolicyPhase defines phases for BackupPolicy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Failed" +

+
+ +
+

+BackupPolicyRetentionPolicy +(`string` alias) +

+ +

+ +(Appears on:BackupPolicySpec, BackupPolicyTemplateSpec) + +

+
+ +

+BackupPolicyRetentionPolicy defines the backup retention policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"" +

+
+ +

+BackupPolicyRetentionPolicyNone indicates that no backup retention policy is set. +

+ +
+ +

+"retainLatestBackup" +

+
+ +

+BackupPolicyRetentionPolicyRetainLatestBackup indicates that the latest backup is retained. +

+ +
+

+BackupPolicySpec + +

+ +

+ +(Appears on:BackupPolicy) + +

+
+ +

+BackupPolicySpec defines the desired state of BackupPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupRepo where the backup data will be stored. +If not set, data will be stored in the default backup repository. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the directory inside the backup repository to store the backup. +This path is relative to the path of the backup repository. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the backup as failed. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ + +

+Specifies the target information to back up, such as the target pod, the +cluster connection credential. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +optional +

+ +
+ +`backupMethods`
+ + +[]BackupMethod + + + +
+ + +

+Defines the backup methods. +

+ +
+ +`useKopia`
+ +bool + + +
+ +(Optional) + +

+Specifies whether backup data should be stored in a Kopia repository. +

+ +

+Data within the Kopia repository is both compressed and encrypted. Furthermore, +data deduplication is implemented across various backups of the same cluster. +This approach significantly reduces the actual storage usage, particularly +for clusters with a low update frequency. +

+ +

+NOTE: This feature should NOT be enabled when using KubeBlocks Community Edition, otherwise the backup will not be processed. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Specifies the parameters for encrypting backup data. +Encryption will be disabled if the field is not set. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Specifies the backup retention policy. This has a precedence over `backup.spec.retentionPeriod`. +

+ +
+

+BackupPolicyStatus + +

+ +

+ +(Appears on:BackupPolicy) + +

+
+ +

+BackupPolicyStatus defines the observed state of BackupPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase - in list of [Available,Unavailable] +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+A human-readable message indicating details about why the BackupPolicy +is in this phase. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this BackupPolicy. +It refers to the BackupPolicy’s generation, which is updated on mutation by the API Server. +

+ +
+

+BackupPolicyTemplate + +

+
+ +

+BackupPolicyTemplate should be provided by addon developers. +It is responsible for generating BackupPolicies for the addon that requires backup operations, +also determining the suitable backup methods and strategies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+The metadata for the BackupPolicyTemplate object, including name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupPolicyTemplateSpec + + + +
+ + +

+Defines the desired state of the BackupPolicyTemplate. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the BackupPolicyTemplate provides, and it is optional. +Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store +
  • +
+ +
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this BackupPolicyTemplate. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+Defines the selection criteria of instance to be backed up, and the connection credential to be used +during the backup process. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ +(Optional) + +

+Defines the execution plans for backup tasks, specifying when and how backups should occur, +and the retention period of backup files. +

+ +
+ +`backupMethods`
+ + +[]BackupMethodTPL + + + +
+ + +

+Defines an array of BackupMethods to be used. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum number of retry attempts for a backup before it is considered a failure. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Defines the backup retention policy to be used. +

+ +
+ +
+ +`status`
+ + +BackupPolicyTemplateStatus + + + +
+ + +

+Populated by the system, it represents the current information about the BackupPolicyTemplate. +

+ +
+

+BackupPolicyTemplateSpec + +

+ +

+ +(Appears on:BackupPolicyTemplate) + +

+
+ +

+BackupPolicyTemplateSpec contains the settings in a BackupPolicyTemplate. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the BackupPolicyTemplate provides, and it is optional. +Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store +
  • +
+ +
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this BackupPolicyTemplate. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+Defines the selection criteria of instance to be backed up, and the connection credential to be used +during the backup process. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ +(Optional) + +

+Defines the execution plans for backup tasks, specifying when and how backups should occur, +and the retention period of backup files. +

+ +
+ +`backupMethods`
+ + +[]BackupMethodTPL + + + +
+ + +

+Defines an array of BackupMethods to be used. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum number of retry attempts for a backup before it is considered a failure. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Defines the backup retention policy to be used. +

+ +
+

+BackupPolicyTemplateStatus + +

+ +

+ +(Appears on:BackupPolicyTemplate) + +

+
+ +

+BackupPolicyTemplateStatus defines the observed state of BackupPolicyTemplate. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this BackupPolicyTemplate. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the BackupPolicyTemplate. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the BackupPolicyTemplate is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+BackupRef + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+ +

+BackupRef describes the backup info. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the backup name. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+Specifies the backup namespace. +

+ +
+ +`sourceTargetName`
+ +string + + +
+ + +

+Specifies the source target for restoration, identified by its name. +

+ +
+

+BackupRepoPhase +(`string` alias) +

+ +

+ +(Appears on:BackupRepoStatus) + +

+
+ +

+BackupRepoPhase denotes different stages for the `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Deleting" +

+
+ +

+BackupRepoDeleting indicates the backup repository is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+BackupRepoFailed indicates the pre-check has been failed. +

+ +
+ +

+"PreChecking" +

+
+ +

+BackupRepoPreChecking indicates the backup repository is being pre-checked. +

+ +
+ +

+"Ready" +

+
+ +

+BackupRepoReady indicates the backup repository is ready for use. +

+ +
+

+BackupRepoSpec + +

+ +

+ +(Appears on:BackupRepo) + +

+
+ +

+BackupRepoSpec defines the desired state of `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storageProviderRef`
+ +string + + +
+ + +

+Specifies the name of the `StorageProvider` used by this backup repository. +

+ +
+ +`accessMethod`
+ + +AccessMethod + + + +
+ +(Optional) + +

+Specifies the access method of the backup repository. +

+ +
+ +`volumeCapacity`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the capacity of the PVC created by this backup repository. +

+ +
+ +`pvReclaimPolicy`
+ + +Kubernetes core/v1.PersistentVolumeReclaimPolicy + + + +
+ + +

+Specifies reclaim policy of the PV created by this backup repository. +

+ +
+ +`config`
+ +map[string]string + + +
+ +(Optional) + +

+Stores the non-secret configuration parameters for the `StorageProvider`. +

+ +
+ +`credential`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+References to the secret that holds the credentials for the `StorageProvider`. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the prefix of the path for storing backup data. +

+ +
+

+BackupRepoStatus + +

+ +

+ +(Appears on:BackupRepo) + +

+
+ +

+BackupRepoStatus defines the observed state of `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +BackupRepoPhase + + + +
+ +(Optional) + +

+Represents the current phase of reconciliation for the backup repository. +Permissible values are PreChecking, Failed, Ready, Deleting. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides a detailed description of the current state of the backup repository. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation of the resource that the controller has observed. +

+ +
+ +`generatedCSIDriverSecret`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+Refers to the generated secret for the `StorageProvider`. +

+ +
+ +`generatedStorageClassName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the generated storage class. +

+ +
+ +`backupPVCName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the PVC that stores backup data. +

+ +
+ +`toolConfigSecretName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the secret that contains the configuration for the tool. +

+ +
+ +`isDefault`
+ +bool + + +
+ +(Optional) + +

+Indicates if this backup repository is the default one. +

+ +
+

+BackupSchedulePhase +(`string` alias) +

+ +

+ +(Appears on:BackupScheduleStatus) + +

+
+ +

+BackupSchedulePhase defines the phase of BackupSchedule +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+BackupSchedulePhaseAvailable indicates the backup schedule is available. +

+ +
+ +

+"Failed" +

+
+ +

+BackupSchedulePhaseFailed indicates the backup schedule has failed. +

+ +
+

+BackupScheduleSpec + +

+ +

+ +(Appears on:BackupSchedule) + +

+
+ +

+BackupScheduleSpec defines the desired state of BackupSchedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backupPolicy to be applied for the `schedules`. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Defines the deadline in minutes for starting the backup workload if it +misses its scheduled time for any reason. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ + +

+Defines the list of backup schedules. +

+ +
+

+BackupScheduleStatus + +

+ +

+ +(Appears on:BackupSchedule) + +

+
+ +

+BackupScheduleStatus defines the observed state of BackupSchedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +BackupSchedulePhase + + + +
+ +(Optional) + +

+Describes the phase of the BackupSchedule. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this BackupSchedule. +It refers to the BackupSchedule’s generation, which is updated on mutation +by the API Server. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Represents an error that caused the backup to fail. +

+ +
+ +`schedules`
+ + +map[string]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ScheduleStatus + + + +
+ +(Optional) + +

+Describes the status of each schedule. +

+ +
+

+BackupSpec + +

+ +

+ +(Appears on:Backup) + +

+
+ +

+BackupSpec defines the desired state of Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backup policy to be applied for this backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in the backup policy. +

+ +
+ +`deletionPolicy`
+ + +BackupDeletionPolicy + + + +
+ + +

+Determines whether the backup contents stored in the backup repository +should be deleted when the backup custom resource(CR) is deleted. +Supported values are `Retain` and `Delete`. +

+
    +
  • +`Retain` means that the backup content and its physical snapshot on backup repository are kept. +
  • +
  • +`Delete` means that the backup content and its physical snapshot on backup repository are deleted. +
  • +
+ +

+the backup CR but retaining the backup contents in backup repository. +The current implementation only prevent accidental deletion of backup data. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines a duration up to which the backup should be kept. +Controller will remove all backups that are older than the RetentionPeriod. +If not set, the backup will be kept forever. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Determines the parent backup name for incremental or differential backup. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+BackupStatus + +

+ +

+ +(Appears on:Backup) + +

+
+ +

+BackupStatus defines the observed state of Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`formatVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the backup format version, which includes major, minor, and patch versions. +

+ +
+ +`phase`
+ + +BackupPhase + + + +
+ +(Optional) + +

+Indicates the current state of the backup operation. +

+ +
+ +`expiration`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Indicates when this backup becomes eligible for garbage collection. +A ‘null’ value implies that the backup will not be cleaned up unless manually deleted. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the backup operation was started. +The server’s time is used for this timestamp. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the backup operation was completed. +This timestamp is recorded even if the backup operation fails. +The server’s time is used for this timestamp. +

+ +
+ +`duration`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Records the duration of the backup operation. +When converted to a string, the format is “1h2m0.5s”. +

+ +
+ +`totalSize`
+ +string + + +
+ +(Optional) + +

+Records the total size of the data backed up. +The size is represented as a string with capacity units in the format of “1Gi”, “1Mi”, “1Ki”. +If no capacity unit is specified, it is assumed to be in bytes. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Any error that caused the backup operation to fail. +

+ +
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+The name of the backup repository. +

+ +
+ +`path`
+ +string + + +
+ +(Optional) + +

+The directory within the backup repository where the backup data is stored. +This is an absolute path within the backup repository. +

+ +
+ +`kopiaRepoPath`
+ +string + + +
+ +(Optional) + +

+Records the path of the Kopia repository. +

+ +
+ +`persistentVolumeClaimName`
+ +string + + +
+ +(Optional) + +

+Records the name of the persistent volume claim used to store the backup data. +

+ +
+ +`timeRange`
+ + +BackupTimeRange + + + +
+ +(Optional) + +

+Records the time range of the data backed up. For Point-in-Time Recovery (PITR), +this is the time range of recoverable data. +

+ +
+ +`target`
+ + +BackupStatusTarget + + + +
+ +(Optional) + +

+Records the target information for this backup. +

+ +
+ +`targets`
+ + +[]BackupStatusTarget + + + +
+ +(Optional) + +

+Records the targets information for this backup. +

+ +
+ +`backupMethod`
+ + +BackupMethod + + + +
+ +(Optional) + +

+Records the backup method information for this backup. +Refer to BackupMethod for more details. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Records the encryption config for this backup. +

+ +
+ +`actions`
+ + +[]ActionStatus + + + +
+ +(Optional) + +

+Records the actions status for this backup. +

+ +
+ +`volumeSnapshots`
+ + +[]VolumeSnapshotStatus + + + +
+ +(Optional) + +

+Records the volume snapshot status for the action. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Records the parent backup name for incremental or differential backup. +When the parent backup is deleted, the backup will also be deleted. +

+ +
+ +`baseBackupName`
+ +string + + +
+ +(Optional) + +

+Records the base full backup name for incremental backup or differential backup. +When the base backup is deleted, the backup will also be deleted. +

+ +
+ +`extras`
+ +[]string + + +
+ +(Optional) + +

+Records any additional information for the backup. +

+ +
+

+BackupStatusTarget + +

+ +

+ +(Appears on:BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`BackupTarget`
+ + +BackupTarget + + + +
+ + +

+ +(Members of `BackupTarget` are embedded into this type.) + +

+ +
+ +`selectedTargetPods`
+ +[]string + + +
+ + +

+Records the selected pods by the target info during backup. +

+ +
+

+BackupTarget + +

+ +

+ +(Appears on:BackupMethod, BackupPolicySpec, BackupStatusTarget) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a mandatory and unique identifier for each target when using the “targets” field. +The backup data for the current target is stored in a uniquely named subdirectory. +

+ +
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Used to find the target pod. The volumes of the target pod will be backed up. +

+ +
+ +`connectionCredential`
+ + +ConnectionCredential + + + +
+ +(Optional) + +

+Specifies the connection credential to connect to the target database cluster. +

+ +
+ +`resources`
+ + +KubeResources + + + +
+ +(Optional) + +

+Specifies the kubernetes resources to back up. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ + +

+Specifies the service account to run the backup workload. +

+ +
+ +`containerPort`
+ + +ContainerPort + + + +
+ +(Optional) + +

+Specifies the container port in the target pod. +If not specified, the first container and its first port will be used. +

+ +
+

+BackupTimeRange + +

+ +

+ +(Appears on:ActionStatus, BackupStatus) + +

+
+ +

+BackupTimeRange records the time range of backed up data, for PITR, this is the +time range of recoverable data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`timeZone`
+ +string + + +
+ +(Optional) + +

+time zone, supports only zone offset, with a value range of “-12:59 ~ +13:00”. +

+ +
+ +`start`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the start time of the backup, in Coordinated Universal Time (UTC). +

+ +
+ +`end`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the end time of the backup, in Coordinated Universal Time (UTC). +

+ +
+

+BackupType +(`string` alias) +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+ +

+BackupType the backup type. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Continuous" +

+
+ +
+ +

+"Differential" +

+
+ +
+ +

+"Full" +

+
+ +
+ +

+"Incremental" +

+
+ +
+ +

+"Selective" +

+
+ +
+

+BaseJobActionSpec + +

+ +

+ +(Appears on:BackupActionSpec, JobActionSpec) + +

+
+ +

+BaseJobActionSpec is an action that creates a Kubernetes Job to execute a command. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Specifies the image of the backup container. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Defines the commands to back up the volume data. +

+ +
+

+ConnectionCredential + +

+ +

+ +(Appears on:BackupTarget, ReadyConfig) + +

+
+ +

+ConnectionCredential specifies the connection credential to connect to the +target database cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`secretName`
+ +string + + +
+ + +

+Refers to the Secret object that contains the connection credential. +

+ +
+ +`usernameKey`
+ +string + + +
+ + +

+Specifies the map key of the user in the connection credential secret. +

+ +
+ +`passwordKey`
+ +string + + +
+ + +

+Specifies the map key of the password in the connection credential secret. +This password will be saved in the backup annotation for full backup. +You can use the environment variable DP_ENCRYPTION_KEY to specify encryption key. +

+ +
+ +`hostKey`
+ +string + + +
+ +(Optional) + +

+Specifies the map key of the host in the connection credential secret. +

+ +
+ +`portKey`
+ +string + + +
+ +(Optional) + +

+Specifies the map key of the port in the connection credential secret. +

+ +
+

+ContainerPort + +

+ +

+ +(Appears on:BackupTarget, TargetInstance) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ + +

+Specifies the name of container with the port. +

+ +
+ +`portName`
+ +string + + +
+ + +

+Specifies the port name. +

+ +
+

+DataRestorePolicy +(`string` alias) +

+ +

+ +(Appears on:RequiredPolicyForAllPodSelection) + +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"OneToMany" +

+
+ +
+ +

+"OneToOne" +

+
+ +
+

+EncryptionConfig + +

+ +

+ +(Appears on:BackupPolicySpec, BackupStatus) + +

+
+ +

+EncryptionConfig defines the parameters for encrypting backup data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`algorithm`
+ +string + + +
+ + +

+Specifies the encryption algorithm. Currently supported algorithms are: +

+
    +
  • +AES-128-CFB +
  • +
  • +AES-192-CFB +
  • +
  • +AES-256-CFB +
  • +
+ +
+ +`passPhraseSecretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ + +

+Selects the key of a secret in the current namespace, the value of the secret +is used as the encryption key. +

+ +
+

+EnvVar + +

+ +

+ +(Appears on:BackupMethodTPL) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the environment variable key. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Specifies the environment variable value. +

+ +
+ +`valueFrom`
+ + +ValueFrom + + + +
+ +(Optional) + +

+Specifies the source used to determine the value of the environment variable. +Cannot be used if value is not empty. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`target`
+ + +ExecActionTarget + + + +
+ +(Optional) + +

+Defines the pods that need to be executed for the exec action. +Execution will occur on all pods that meet the conditions. +

+ +
+

+ExecActionSpec + +

+ +

+ +(Appears on:ActionSpec) + +

+
+ +

+ExecActionSpec is an action that uses the pod exec API to execute a command in a container +in a pod. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ +(Optional) + +

+Specifies the container within the pod where the command should be executed. +If not specified, the first container in the pod is used by default. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Defines the command and arguments to be executed. +

+ +
+ +`onError`
+ + +ActionErrorMode + + + +
+ +(Optional) + +

+Indicates how to behave if an error is encountered during the execution of this action. +

+ +
+ +`timeout`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Specifies the maximum duration to wait for the hook to complete before +considering the execution a failure. +

+ +
+

+ExecActionTarget + +

+ +

+ +(Appears on:ExecAction) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Executes kubectl in all selected pods. +

+ +
+

+IncludeResource + +

+ +

+ +(Appears on:RestoreKubeResources) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`groupResource`
+ +string + + +
+ + +
+ +`labelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Selects the specified resource for recovery by label. +

+ +
+

+JobAction + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requiredPolicyForAllPodSelection`
+ + +RequiredPolicyForAllPodSelection + + + +
+ + +

+Specifies the restore policy, which is required when the pod selection strategy for the source target is ‘All’. +This field is ignored if the pod selection strategy is ‘Any’. +optional +

+ +
+ +`target`
+ + +JobActionTarget + + + +
+ + +

+Defines the pods that needs to be executed for the job action. +

+ +
+

+JobActionSpec + +

+ +

+ +(Appears on:ActionSpec, BackupDataActionSpec, RestoreActionSpec) + +

+
+ +

+JobActionSpec is an action that creates a Kubernetes Job to execute a command. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`BaseJobActionSpec`
+ + +BaseJobActionSpec + + + +
+ + +

+ +(Members of `BaseJobActionSpec` are embedded into this type.) + +

+ +
+ +`runOnTargetPodNode`
+ +bool + + +
+ +(Optional) + +

+Determines whether to run the job workload on the target pod node. +If the backup container needs to mount the target pod’s volumes, this field +should be set to true. Otherwise, the target pod’s volumes will be ignored. +

+ +
+ +`onError`
+ + +ActionErrorMode + + + +
+ +(Optional) + +

+Indicates how to behave if an error is encountered during the execution of this action. +

+ +
+

+JobActionTarget + +

+ +

+ +(Appears on:JobAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Selects one of the pods, identified by labels, to build the job spec. +This includes mounting required volumes and injecting built-in environment variables of the selected pod. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines which volumes of the selected pod need to be mounted on the restoring pod. +

+ +
+

+KubeResources + +

+ +

+ +(Appears on:BackupTarget) + +

+
+ +

+KubeResources defines the kubernetes resources to back up. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+A metav1.LabelSelector to filter the target kubernetes resources that need +to be backed up. If not set, will do not back up any kubernetes resources. +

+ +
+ +`included`
+ +[]string + + +
+ +(Optional) + +

+included is a slice of namespaced-scoped resource type names to include in +the kubernetes resources. +The default value is empty. +

+ +
+ +`excluded`
+ +[]string + + +
+ +(Optional) + +

+excluded is a slice of namespaced-scoped resource type names to exclude in +the kubernetes resources. +The default value is empty. +

+ +
+

+ParameterPair + +

+ +

+ +(Appears on:BackupSpec, RestoreSpec, SchedulePolicy) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the name of the parameter. +

+ +
+ +`value`
+ +string + + +
+ + +

+Represents the parameter values. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:StorageProviderSpec) + +

+
+ +

+ParametersSchema describes the parameters needed for a certain storage. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the parameters in OpenAPI V3. +

+ +
+ +`credentialFields`
+ +[]string + + +
+ +(Optional) + +

+Defines which parameters are credential fields, which need to be handled specifically. +For instance, these should be stored in a `Secret` instead of a `ConfigMap`. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ActionSetStatus, BackupPolicyStatus, BackupPolicyTemplateStatus) + +

+
+ +

+Phase defines the BackupPolicy and ActionSet CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+PodSelectionStrategy +(`string` alias) +

+ +

+ +(Appears on:PodSelector, TargetInstance) + +

+
+ +

+PodSelectionStrategy specifies the strategy to select when multiple pods are +selected for backup target +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +

+PodSelectionStrategyAll selects all pods that match the labelsSelector. +

+ +
+ +

+"Any" +

+
+ +

+PodSelectionStrategyAny selects any one pod that match the labelsSelector. +

+ +
+

+PodSelector + +

+ +

+ +(Appears on:BackupTarget, JobActionTarget) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`LabelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+ +(Members of `LabelSelector` are embedded into this type.) + +

+ +

+labelsSelector is the label selector to filter the target pods. +

+ +
+ +`fallbackLabelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+fallbackLabelSelector is used to filter available pods when the labelSelector fails. +This only takes effect when the `strategy` field below is set to `Any`. +

+ +
+ +`strategy`
+ + +PodSelectionStrategy + + + +
+ + +

+Specifies the strategy to select the target pod when multiple pods are selected. +Valid values are: +

+
    +
  • +`Any`: select any one pod that match the labelsSelector. +
  • +
  • +`All`: select all pods that match the labelsSelector. The backup data for the current pod +will be stored in a subdirectory named after the pod. +
  • +
+ +
+ +`useParentSelectedPods`
+ +bool + + +
+ +(Optional) + +

+UseParentSelectedPods indicates whether to use the pods selected by the parent for backup. +If set to true, the backup will use the same pods selected by the parent. +And only takes effect when the ‘strategy’ is set to ‘Any’. +

+ +
+

+PrepareDataConfig + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requiredPolicyForAllPodSelection`
+ + +RequiredPolicyForAllPodSelection + + + +
+ + +

+Specifies the restore policy, which is required when the pod selection strategy for the source target is ‘All’. +This field is ignored if the pod selection strategy is ‘Any’. +optional +

+ +
+ +`dataSourceRef`
+ + +VolumeConfig + + + +
+ +(Optional) + +

+Specifies the configuration when using `persistentVolumeClaim.spec.dataSourceRef` method for restoring. +Describes the source volume of the backup targetVolumes and the mount path in the restoring container. +

+ +
+ +`volumeClaims`
+ + +[]RestoreVolumeClaim + + + +
+ +(Optional) + +

+Defines the persistent Volume claims that need to be restored and mounted together into the restore job. +These persistent Volume claims will be created if they do not exist. +

+ +
+ +`volumeClaimsTemplate`
+ + +RestoreVolumeClaimsTemplate + + + +
+ +(Optional) + +

+Defines a template to build persistent Volume claims that need to be restored. +These claims will be created in an orderly manner based on the number of replicas or reused if they already exist. +

+ +
+ +`volumeClaimRestorePolicy`
+ + +VolumeClaimRestorePolicy + + + +
+ + +

+Defines restore policy for persistent volume claim. +Supported policies are as follows: +

+
    +
  • +`Parallel`: parallel recovery of persistent volume claim. +
  • +
  • +`Serial`: restore the persistent volume claim in sequence, and wait until the previous persistent volume claim is restored before restoring a new one. +
  • +
+ +
+ +`schedulingSpec`
+ + +SchedulingSpec + + + +
+ +(Optional) + +

+Specifies the scheduling spec for the restoring pod. +

+ +
+

+ReadinessProbe + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`initialDelaySeconds`
+ +int + + +
+ +(Optional) + +

+Specifies the number of seconds after the container has started before the probe is initiated. +

+ +
+ +`timeoutSeconds`
+ +int + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +The default value is 30 seconds, and the minimum value is 1. +

+ +
+ +`periodSeconds`
+ +int + + +
+ +(Optional) + +

+Specifies how often (in seconds) to perform the probe. +The default value is 5 seconds, and the minimum value is 1. +

+ +
+ +`exec`
+ + +ReadinessProbeExecAction + + + +
+ + +

+Specifies the action to take. +

+ +
+

+ReadinessProbeExecAction + +

+ +

+ +(Appears on:ReadinessProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Refers to the container image. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Refers to the container command. +

+ +
+

+ReadyConfig + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`jobAction`
+ + +JobAction + + + +
+ +(Optional) + +

+Specifies the configuration for a job action. +

+ +
+ +`execAction`
+ + +ExecAction + + + +
+ +(Optional) + +

+Specifies the configuration for an exec action. +

+ +
+ +`connectionCredential`
+ + +ConnectionCredential + + + +
+ +(Optional) + +

+Defines the credential template used to create a connection credential. +

+ +
+ +`readinessProbe`
+ + +ReadinessProbe + + + +
+ +(Optional) + +

+Defines a periodic probe of the service readiness. +The controller will perform postReadyHooks of BackupScript.spec.restore +after the service readiness when readinessProbe is configured. +

+ +
+

+RequiredPolicyForAllPodSelection + +

+ +

+ +(Appears on:JobAction, PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`dataRestorePolicy`
+ + +DataRestorePolicy + + + +
+ + +

+Specifies the data restore policy. Options include: +- OneToMany: Enables restoration of all volumes from a single data copy of the original target instance. +The ‘sourceOfOneToMany’ field must be set when using this policy. +- OneToOne: Restricts data restoration such that each data piece can only be restored to a single target instance. +This is the default policy. When the number of target instances specified for restoration surpasses the count of original backup target instances. +

+ +
+ +`sourceOfOneToMany`
+ + +SourceOfOneToMany + + + +
+ + +

+Specifies the name of the source target pod. This field is mandatory when the DataRestorePolicy is configured to ‘OneToMany’. +

+ +
+

+RestoreActionSpec + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+ +

+RestoreActionSpec defines how to restore data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`prepareData`
+ + +JobActionSpec + + + +
+ +(Optional) + +

+Specifies the action required to prepare data for restoration. +

+ +
+ +`postReady`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Specifies the actions that should be executed after the data has been prepared and is ready for restoration. +

+ +
+ +`baseBackupRequired`
+ +bool + + +
+ +(Optional) + +

+Determines if a base backup is required during restoration. +

+ +
+ +`withParameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters used by the restore action +

+ +
+

+RestoreActionStatus +(`string` alias) +

+ +

+ +(Appears on:RestoreStatusAction) + +

+
+ +

+RestoreActionStatus the status of restore action. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Processing" +

+
+ +
+

+RestoreKubeResources + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`included`
+ + +[]IncludeResource + + + +
+ +(Optional) + +

+Restores the specified resources. +

+ +
+

+RestorePhase +(`string` alias) +

+ +

+ +(Appears on:RestoreStatus) + +

+
+ +

+RestorePhase The current phase. Valid values are Running, Completed, Failed, AsDataSource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"AsDataSource" +

+
+ +
+ +

+"Completed" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Running" +

+
+ +
+

+RestoreSpec + +

+ +

+ +(Appears on:Restore) + +

+
+ +

+RestoreSpec defines the desired state of Restore +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backup`
+ + +BackupRef + + + +
+ + +

+Specifies the backup to be restored. The restore behavior is based on the backup type: +

+
    +
  1. +Full: will be restored the full backup directly. +
  2. +
  3. +Incremental: will be restored sequentially from the most recent full backup of this incremental backup. +
  4. +
  5. +Differential: will be restored sequentially from the parent backup of the differential backup. +
  6. +
  7. +Continuous: will find the most recent full backup at this time point and the continuous backups after it to restore. +
  8. +
+ +
+ +`restoreTime`
+ +string + + +
+ +(Optional) + +

+Specifies the point in time for restoring. +

+ +
+ +`resources`
+ + +RestoreKubeResources + + + +
+ +(Optional) + +

+Restores the specified resources of Kubernetes. +

+ +
+ +`prepareDataConfig`
+ + +PrepareDataConfig + + + +
+ +(Optional) + +

+Configuration for the action of “prepareData” phase, including the persistent volume claims +that need to be restored and scheduling strategy of temporary recovery pod. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the service account name needed for recovery pod. +

+ +
+ +`readyConfig`
+ + +ReadyConfig + + + +
+ +(Optional) + +

+Configuration for the action of “postReady” phase. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to set in the container for restore. These will be +merged with the env of Backup and ActionSet. +

+ +

+The priority of merging is as follows: `Restore env > Backup env > ActionSet env`. +

+ +
+ +`containerResources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the required resources of restore job’s container. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the restore failed. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+RestoreStage +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"postReady" +

+
+ +
+ +

+"prepareData" +

+
+ +
+

+RestoreStatus + +

+ +

+ +(Appears on:Restore) + +

+
+ +

+RestoreStatus defines the observed state of Restore +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +RestorePhase + + + +
+ +(Optional) + +

+Represents the current phase of the restore. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the date/time when the restore started being processed. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the date/time when the restore finished being processed. +

+ +
+ +`duration`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Records the duration of the restore execution. +When converted to a string, the form is “1h2m0.5s”. +

+ +
+ +`actions`
+ + +RestoreStatusActions + + + +
+ +(Optional) + +

+Records all restore actions performed. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the current state of the restore API Resource, like warning. +

+ +
+

+RestoreStatusAction + +

+ +

+ +(Appears on:RestoreStatusActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Describes the name of the restore action based on the current backup. +

+ +
+ +`backupName`
+ +string + + +
+ + +

+Describes which backup’s restore action belongs to. +

+ +
+ +`objectKey`
+ +string + + +
+ + +

+Describes the execution object of the restore action. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable message indicating details about the object condition. +

+ +
+ +`status`
+ + +RestoreActionStatus + + + +
+ + +

+The status of this action. +

+ +
+ +`startTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+The start time of the restore job. +

+ +
+ +`endTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+The completion time of the restore job. +

+ +
+

+RestoreStatusActions + +

+ +

+ +(Appears on:RestoreStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`prepareData`
+ + +[]RestoreStatusAction + + + +
+ +(Optional) + +

+Records the actions for the prepareData phase. +

+ +
+ +`postReady`
+ + +[]RestoreStatusAction + + + +
+ +(Optional) + +

+Records the actions for the postReady phase. +

+ +
+

+RestoreVolumeClaim + +

+ +

+ +(Appears on:PrepareDataConfig, RestoreVolumeClaimsTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Specifies the standard metadata for the object. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`volumeClaimSpec`
+ + +Kubernetes core/v1.PersistentVolumeClaimSpec + + + +
+ + +

+Defines the desired characteristics of a persistent volume claim. +

+ +
+ +`VolumeConfig`
+ + +VolumeConfig + + + +
+ + +

+ +(Members of `VolumeConfig` are embedded into this type.) + +

+ +

+Describes the source volume of the backup target volumes and the mount path in the restoring container. +At least one must exist for volumeSource and mountPath. +

+ +
+

+RestoreVolumeClaimsTemplate + +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templates`
+ + +[]RestoreVolumeClaim + + + +
+ + +

+Contains a list of volume claims. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the replicas of persistent volume claim that need to be created and restored. +The format of the created claim name is `$(template-name)-$(index)`. +

+ +
+ +`startingIndex`
+ +int32 + + +
+ + +

+Specifies the starting index for the created persistent volume claim according to the template. +The minimum value is 0. +

+ +
+

+RetentionPeriod +(`string` alias) +

+ +

+ +(Appears on:BackupSpec, SchedulePolicy) + +

+
+ +

+RetentionPeriod represents a duration in the format “1y2mo3w4d5h6m”, where +y=year, mo=month, w=week, d=day, h=hour, m=minute. +

+
+

+RuntimeSettings + +

+ +

+ +(Appears on:BackupMethod, BackupMethodTPL) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resource required by container. +More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +

+ +
+

+SchedulePhase +(`string` alias) +

+ +

+ +(Appears on:ScheduleStatus) + +

+
+ +

+SchedulePhase represents the phase of a schedule. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Running" +

+
+ +
+

+SchedulePolicy + +

+ +

+ +(Appears on:BackupPolicyTemplateSpec, BackupScheduleSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the backup schedule is enabled or not. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the schedule. Names cannot be duplicated. +If the name is empty, it will be considered the same as the value of the backupMethod below. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ + +

+Specifies the cron expression for the schedule. The timezone is in UTC. +see https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines the duration for which the backup should be kept. +KubeBlocks will remove all backups that are older than the RetentionPeriod. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+ScheduleStatus + +

+ +

+ +(Appears on:BackupScheduleStatus) + +

+
+ +

+ScheduleStatus represents the status of each schedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +SchedulePhase + + + +
+ +(Optional) + +

+Describes the phase of the schedule. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Represents an error that caused the backup to fail. +

+ +
+ +`lastScheduleTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the last time the backup was scheduled. +

+ +
+ +`lastSuccessfulTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the last time the backup was successfully completed. +

+ +
+

+SchedulingSpec + +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Specifies the tolerations for the restoring pod. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+Defines a selector which must be true for the pod to fit on a node. +The selector must match a node’s labels for the pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+Specifies a request to schedule this pod onto a specific node. If it is non-empty, +the scheduler simply schedules this pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Contains a group of affinity scheduling rules. +Refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+Describes how a group of pods ought to spread across topology +domains. The scheduler will schedule pods in a way which abides by the constraints. +Refer to https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ +

+ +
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+Specifies the scheduler to dispatch the pod. +If not specified, the pod will be dispatched by the default scheduler. +

+ +
+

+SourceOfOneToMany + +

+ +

+ +(Appears on:RequiredPolicyForAllPodSelection) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`targetPodName`
+ +string + + +
+ + +

+Specifies the name of the source target pod. +

+ +
+

+StorageProviderPhase +(`string` alias) +

+ +

+ +(Appears on:StorageProviderStatus) + +

+
+ +

+StorageProviderPhase defines phases of a `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"NotReady" +

+
+ +

+StorageProviderNotReady indicates that the `StorageProvider` is not ready, +usually because the specified CSI driver is not yet installed. +

+ +
+ +

+"Ready" +

+
+ +

+StorageProviderReady indicates that the `StorageProvider` is ready for use. +

+ +
+

+StorageProviderSpec + +

+ +

+ +(Appears on:StorageProvider) + +

+
+ +

+StorageProviderSpec defines the desired state of `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`csiDriverName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the CSI driver used to access remote storage. +This field can be empty, it indicates that the storage is not accessible via CSI. +

+ +
+ +`csiDriverSecretTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that used to render and generate `k8s.io/api/core/v1.Secret` +resources for a specific CSI driver. +For example, `accessKey` and `secretKey` needed by CSI-S3 are stored in this +`Secret` resource. +

+ +
+ +`storageClassTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template utilized to render and generate `kubernetes.storage.k8s.io.v1.StorageClass` +resources. The `StorageClass’ created by this template is aimed at using the CSI driver. +

+ +
+ +`persistentVolumeClaimTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that renders and generates `k8s.io/api/core/v1.PersistentVolumeClaim` +resources. This PVC can reference the `StorageClass` created from `storageClassTemplate`, +allowing Pods to access remote storage by mounting the PVC. +

+ +
+ +`datasafedConfigTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template used to render and generate `k8s.io/api/core/v1.Secret`. +This `Secret` involves the configuration details required by the `datasafed` tool +to access remote storage. For example, the `Secret` should contain `endpoint`, +`bucket`, ‘region’, ‘accessKey’, ‘secretKey’, or something else for S3 storage. +This field can be empty, it means this kind of storage is not accessible via +the `datasafed` tool. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Describes the parameters required for storage. +The parameters defined here can be referenced in the above templates, +and `kbcli` uses this definition for dynamic command-line parameter parsing. +

+ +
+

+StorageProviderStatus + +

+ +

+ +(Appears on:StorageProvider) + +

+
+ +

+StorageProviderStatus defines the observed state of `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +StorageProviderPhase + + + +
+ + +

+The phase of the `StorageProvider`. Valid phases are `NotReady` and `Ready`. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the current state of the `StorageProvider`. +

+ +
+

+SyncProgress + +

+ +

+ +(Appears on:BackupDataActionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Determines if the backup progress should be synchronized. If set to true, +a sidecar container will be instantiated to synchronize the backup progress with the +Backup Custom Resource (CR) status. +

+ +
+ +`intervalSeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the interval in seconds for synchronizing the backup progress. +

+ +
+

+TargetInstance + +

+ +

+ +(Appears on:BackupMethodTPL, BackupPolicyTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ + +

+Specifies the role to select one or more replicas for backup. +

+
    +
  • +If no replica with the specified role exists, the backup task will fail. +Special case: If there is only one replica in the cluster, it will be used for backup, +even if its role differs from the specified one. +For example, if you specify backing up on a secondary replica, but the cluster is single-node +with only one primary replica, the primary will be used for backup. +Future versions will address this special case using role priorities. +
  • +
  • +If multiple replicas satisfy the specified role, the choice (`Any` or `All`) will be made according to +the `strategy` field below. +
  • +
+ +
+ +`fallbackRole`
+ +string + + +
+ +(Optional) + +

+Specifies the fallback role to select one replica for backup, this only takes effect when the +`strategy` field below is set to `Any`. +

+ +
+ +`account`
+ +string + + +
+ +(Optional) + +

+If `backupPolicy.componentDefs` is set, this field is required to specify the system account name. +This account must match one listed in `componentDefinition.spec.systemAccounts[*].name`. +The corresponding secret created by this account is used to connect to the database. +

+ +
+ +`strategy`
+ + +PodSelectionStrategy + + + +
+ +(Optional) + +

+Specifies the PodSelectionStrategy to use when multiple pods are +selected for the backup target. +Valid values are: +

+
    +
  • +Any: Selects any one pod that matches the labelsSelector. +
  • +
  • +All: Selects all pods that match the labelsSelector. +
  • +
+ +
+ +`useParentSelectedPods`
+ +bool + + +
+ +(Optional) + +

+UseParentSelectedPods indicates whether to use the pods selected by the parent for backup. +If set to true, the backup will use the same pods selected by the parent. +And only takes effect when the ‘strategy’ is set to ‘Any’. +

+ +
+ +`containerPort`
+ + +ContainerPort + + + +
+ +(Optional) + +

+Specifies the container port in the target pod. +If not specified, the first container and its first port will be used. +

+ +
+

+TargetVolumeInfo + +

+ +

+ +(Appears on:BackupMethod, BackupMethodTPL) + +

+
+ +

+TargetVolumeInfo specifies the volumes and their mounts of the targeted application +that should be mounted in backup workload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumes`
+ +[]string + + +
+ +(Optional) + +

+Specifies the list of volumes of targeted application that should be mounted +on the backup workload. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Specifies the mount for the volumes specified in `volumes` section. +

+ +
+

+ValueFrom + +

+ +

+ +(Appears on:EnvVar) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`versionMapping`
+ + +[]VersionMapping + + + +
+ +(Optional) + +

+Determine the appropriate version of the backup tool image from service version. +

+ +
+

+VersionMapping + +

+ +

+ +(Appears on:ValueFrom) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceVersions`
+ +[]string + + +
+ + +

+Represents an array of the service version that can be mapped to the appropriate value. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“8.0.33”: Matches the exact name “8.0.33” +
  • +
  • +“8.0”: Matches all names starting with “8.0” +
  • +
  • +”^8.0.\d{1,2}$“: Matches all names starting with “8.0.” followed by one or two digits. +
  • +
+ +
+ +`mappedValue`
+ +string + + +
+ + +

+Specifies a mapping value based on service version. +Typically used to set up the tools image required for backup operations. +

+ +
+

+VolumeClaimRestorePolicy +(`string` alias) +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+ +

+VolumeClaimRestorePolicy defines restore policy for persistent volume claim. +Supported policies are as follows: +

+
    +
  1. +Parallel: parallel recovery of persistent volume claim. +
  2. +
  3. +Serial: restore the persistent volume claim in sequence, and wait until the previous persistent volume claim is restored before restoring a new one. +
  4. +
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+VolumeConfig + +

+ +

+ +(Appears on:PrepareDataConfig, RestoreVolumeClaim) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumeSource`
+ +string + + +
+ +(Optional) + +

+Describes the volume that will be restored from the specified volume of the backup targetVolumes. +This is required if the backup uses a volume snapshot. +

+ +
+ +`mountPath`
+ +string + + +
+ +(Optional) + +

+Specifies the path within the restoring container at which the volume should be mounted. +

+ +
+

+VolumeSnapshotStatus + +

+ +

+ +(Appears on:ActionStatus, BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the volume snapshot. +

+ +
+ +`contentName`
+ +string + + +
+ +(Optional) + +

+The name of the volume snapshot content. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+The name of the volume. +

+ +
+ +`size`
+ +string + + +
+ +(Optional) + +

+The size of the volume snapshot. +

+ +
+ +`targetName`
+ +string + + +
+ + +

+Associates this volumeSnapshot with its corresponding target. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/operations.mdx b/docs/en/release-1_0_1/user_docs/references/api-reference/operations.mdx new file mode 100644 index 00000000..bee60182 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/operations.mdx @@ -0,0 +1,7998 @@ +--- +title: Operations API Reference +description: Operations API Reference +keywords: [operations, api] +sidebar_position: 2 +sidebar_label: Operations +--- +
+ +

+Packages: +

+ +

operations.kubeblocks.io/v1alpha1

+Resource Types: + +

+OpsDefinition + +

+
+ +

+OpsDefinition is the Schema for the OpsDefinitions API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`operations.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`OpsDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +OpsDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`preConditions`
+ + +[]PreCondition + + + +
+ +(Optional) + +

+Specifies the preconditions that must be met to run the actions for the operation. +if set, it will check the condition before the Component runs this operation. +Example: +

+
+
+ preConditions:
+ - rule:
+     expression: '{{ eq .component.status.phase "Running" }}'
+     message: Component is not in Running status.
+
+
+ +
+ +`podInfoExtractors`
+ + +[]PodInfoExtractor + + + +
+ +(Optional) + +

+Specifies a list of PodInfoExtractor, each designed to select a specific Pod and extract selected runtime info +from its PodSpec. +The extracted information, such as environment variables, volumes and tolerations, are then injected into +Jobs or Pods that execute the OpsActions defined in `actions`. +

+ +
+ +`componentInfos`
+ + +[]ComponentInfo + + + +
+ +(Optional) + +

+Specifies a list of ComponentDefinition for Components associated with this OpsDefinition. +It also includes connection credentials (address and account) for each Component. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema for validating the data types and value ranges of parameters in OpsActions before their usage. +

+ +
+ +`actions`
+ + +[]OpsAction + + + +
+ + +

+Specifies a list of OpsAction where each customized action is executed sequentially. +

+ +
+ +
+ +`status`
+ + +OpsDefinitionStatus + + + +
+ + +
+

+OpsRequest + +

+
+ +

+OpsRequest is the Schema for the opsrequests API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`operations.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`OpsRequest` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +OpsRequestSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`cancel`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the current operation should be canceled and terminated gracefully if it’s in the +“Pending”, “Creating”, or “Running” state. +

+ +

+This field applies only to “VerticalScaling” and “HorizontalScaling” opsRequests. +

+ +

+Note: Setting `cancel` to true is irreversible; further modifications to this field are ineffective. +

+ +
+ +`force`
+ +bool + + +
+ +(Optional) + +

+Instructs the system to bypass pre-checks (including cluster state checks and customized pre-conditions hooks) +and immediately execute the opsRequest, except for the opsRequest of ‘Start’ type, which will still undergo +pre-checks even if `force` is true. +

+ +

+This is useful for concurrent execution of ‘VerticalScaling’ and ‘HorizontalScaling’ opsRequests. +By setting `force` to true, you can bypass the default checks and demand these opsRequests to run +simultaneously. +

+ +

+Note: Once set, the `force` field is immutable and cannot be updated. +

+ +
+ +`enqueueOnForce`
+ +bool + + +
+ +(Optional) + +

+Indicates whether opsRequest should continue to queue when ‘force’ is set to true. +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, +“Expose”, “RebuildInstance”, “Custom”. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`ttlSecondsAfterSucceed`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after successfully completing +(when `opsRequest.status.phase` is “Succeed”) before automatic deletion. +

+ +
+ +`ttlSecondsAfterUnsuccessfulCompletion`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after completion +for any phase other than “Succeed” (e.g., “Failed”, “Cancelled”, “Aborted”) before automatic deletion. +

+ +
+ +`preConditionDeadlineSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum time in seconds that the OpsRequest will wait for its pre-conditions to be met +before it aborts the operation. +If set to 0 (default), pre-conditions must be satisfied immediately for the OpsRequest to proceed. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration (in seconds) that an opsRequest is allowed to run. +If the opsRequest runs longer than this duration, its phase will be marked as Aborted. +If this value is not set or set to 0, the timeout will be ignored and the opsRequest will run indefinitely. +

+ +
+ +`SpecificOpsRequest`
+ + +SpecificOpsRequest + + + +
+ + +

+ +(Members of `SpecificOpsRequest` are embedded into this type.) + +

+ +

+Exactly one of its members must be set. +

+ +
+ +
+ +`status`
+ + +OpsRequestStatus + + + +
+ + +
+

+ActionTask + +

+ +

+ +(Appears on:ProgressStatusDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`objectKey`
+ +string + + +
+ + +

+Represents the name of the task. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+Represents the namespace where the task is deployed. +

+ +
+ +`status`
+ + +ActionTaskStatus + + + +
+ + +

+Indicates the current status of the task, including “Processing”, “Failed”, “Succeed”. +

+ +
+ +`targetPodName`
+ +string + + +
+ +(Optional) + +

+The name of the Pod that the task is associated with or operates on. +

+ +
+ +`retries`
+ +int32 + + +
+ +(Optional) + +

+The count of retry attempts made for this task. +

+ +
+

+ActionTaskStatus +(`string` alias) +

+ +

+ +(Appears on:ActionTask) + +

+
+ +

+ActionTaskStatus defines the status of the task. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Processing" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+Backup + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Backup custom resource. +

+ +
+ +`backupPolicyName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of the BackupPolicy applied to perform this Backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupMethod. +The specified BackupMethod must be defined in the BackupPolicy. +

+ +
+ +`deletionPolicy`
+ +string + + +
+ +(Optional) + +

+Determines whether the backup contents stored in backup repository +should be deleted when the Backup custom resource is deleted. +Supported values are `Retain` and `Delete`. +- `Retain` means that the backup content and its physical snapshot on backup repository are kept. +- `Delete` means that the backup content and its physical snapshot on backup repository are deleted. +

+ +
+ +`retentionPeriod`
+ +string + + +
+ +(Optional) + +

+Determines the duration for which the Backup custom resources should be retained. +

+ +

+The controller will automatically remove all Backup objects that are older than the specified RetentionPeriod. +For example, RetentionPeriod of `30d` will keep only the Backup objects of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +If not set, the Backup objects will be kept forever. +

+ +

+If the `deletionPolicy` is set to ‘Delete’, then the associated backup data will also be deleted +along with the Backup object. +Otherwise, only the Backup custom resource will be deleted. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+If the specified BackupMethod is incremental, `parentBackupName` is required. +

+ +
+ +`parameters`
+ +[]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ParameterPair + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+BackupRefSpec + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ref`
+ + +RefNamespaceName + + + +
+ +(Optional) + +

+Refers to a reference backup that needs to be restored. +

+ +
+

+CompletionProbe + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the resource has been patched before initiating completion probes. +The default value is 5 seconds, with a minimum value of 1. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +The default value is 60 seconds, with a minimum value of 1. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency (in seconds) at which the probe should be performed. +The default value is 5 seconds, with a minimum value of 1. +

+ +
+ +`matchExpressions`
+ + +MatchExpressions + + + +
+ + +

+Executes expressions regularly, based on the value of PeriodSeconds, to determine if the action has been completed. +

+ +
+

+ComponentInfo + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDefinitionName`
+ +string + + +
+ + +

+Specifies the name of the ComponentDefinition. +The name can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`accountName`
+ +string + + +
+ +(Optional) + +

+Specifies the account name associated with the Component. +If set, the corresponding account username and password are injected into containers’ environment variables +`KB_ACCOUNT_USERNAME` and `KB_ACCOUNT_PASSWORD`. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Service. +If set, the service name is injected as the `KB_COMP_SVC_NAME` environment variable in the containers, +and each service port is mapped to a corresponding environment variable named `KB_COMP_SVC_PORT_$(portName)`. +The `portName` is transformed by replacing ‘-’ with ‘_’ and converting to uppercase. +

+ +
+

+ComponentOps + +

+ +

+ +(Appears on:CustomOpsComponent, HorizontalScaling, RebuildInstance, Reconfigure, SpecificOpsRequest, UpgradeComponent, VerticalScaling, VolumeExpansion) + +

+
+ +

+ComponentOps specifies the Component to be operated on. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component as defined in the cluster.spec +

+ +
+

+CustomOps + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`opsDefinitionName`
+ +string + + +
+ + +

+Specifies the name of the OpsDefinition. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ + +

+Specifies the name of the ServiceAccount to be used for executing the custom operation. +

+ +
+ +`maxConcurrentComponents`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Specifies the maximum number of components to be operated on concurrently to mitigate performance impact +on clusters with multiple components. +

+ +

+It accepts an absolute number (e.g., 5) or a percentage of components to execute in parallel (e.g., “10%”). +Percentages are rounded up to the nearest whole number of components. +For example, if “10%” results in less than one, it rounds up to 1. +

+ +

+When unspecified, all components are processed simultaneously by default. +

+ +

+Note: This feature is not implemented yet. +

+ +
+ +`components`
+ + +[]CustomOpsComponent + + + +
+ + +

+Specifies the components and their parameters for executing custom actions as defined in OpsDefinition. +Requires at least one component. +

+ +
+

+CustomOpsComponent + +

+ +

+ +(Appears on:CustomOps) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +[]Parameter + + + +
+ +(Optional) + +

+Specifies the parameters that match the schema specified in the `opsDefinition.spec.parametersSchema`. +

+ +
+

+EnvVarRef + +

+ +

+ +(Appears on:OpsVarSource) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`targetContainerName`
+ +string + + +
+ +(Optional) + +

+Specifies the container name in the target Pod. +If not specified, the first container will be used by default. +

+ +
+ +`envName`
+ +string + + +
+ + +

+Defines the name of the environment variable. +This name can originate from an ‘env’ entry or be a data key from an ‘envFrom’ source. +

+ +
+

+Expose + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`switch`
+ + +ExposeSwitch + + + +
+ + +

+Indicates whether the services will be exposed. +‘Enable’ exposes the services. while ‘Disable’ removes the exposed Service. +

+ +
+ +`services`
+ + +[]OpsService + + + +
+ + +

+Specifies a list of OpsService. +When an OpsService is exposed, a corresponding ClusterService will be added to `cluster.spec.services`. +On the other hand, when an OpsService is unexposed, the corresponding ClusterService will be removed +from `cluster.spec.services`. +

+ +

+Note: If `componentName` is not specified, the `ports` and `selector` fields must be provided +in each OpsService definition. +

+ +
+

+ExposeSwitch +(`string` alias) +

+ +

+ +(Appears on:Expose) + +

+
+ +

+ExposeSwitch Specifies the switch for the expose operation. This switch can be used to enable or disable the expose operation. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Disable" +

+
+ +
+ +

+"Enable" +

+
+ +
+

+FailurePolicyType +(`string` alias) +

+ +

+ +(Appears on:OpsAction) + +

+
+ +

+FailurePolicyType specifies the type of failure policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Fail" +

+
+ +

+FailurePolicyFail means that an error will be reported. +

+ +
+ +

+"Ignore" +

+
+ +

+FailurePolicyIgnore means that an error will be ignored but logged. +

+ +
+

+HorizontalScaling + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+HorizontalScaling defines the parameters of a horizontal scaling operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards for the component. +This parameter is mutually exclusive with other parameters. +

+ +
+ +`scaleOut`
+ + +ScaleOut + + + +
+ +(Optional) + +

+Specifies the replica changes for scaling out components and instance templates, +and brings offline instances back online. Can be used in conjunction with the “scaleIn” operation. +Note: Any configuration that deletes instances is considered invalid. +

+ +
+ +`scaleIn`
+ + +ScaleIn + + + +
+ +(Optional) + +

+Specifies the replica changes for scaling in components and instance templates, +and takes specified instances offline. Can be used in conjunction with the “scaleOut” operation. +Note: Any configuration that creates instances is considered invalid. +

+ +
+

+Instance + +

+ +

+ +(Appears on:RebuildInstance) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Pod name of the instance. +

+ +
+ +`targetNodeName`
+ +string + + +
+ +(Optional) + +

+The instance will rebuild on the specified node. +If not set, it will rebuild on a random node. +

+ +
+

+InstanceReplicasTemplate + +

+ +

+ +(Appears on:ReplicaChanger) + +

+
+ +

+InstanceReplicasTemplate defines the template for instance replicas. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the instance template. +

+ +
+ +`replicaChanges`
+ +int32 + + +
+ + +

+Specifies the replica changes for the instance template. +

+ +
+

+InstanceResourceTemplate + +

+ +

+ +(Appears on:VerticalScaling) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refer to the instance template name of the component or sharding. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+ +

+Defines the computational resource size for vertical scaling. +

+ +
+

+InstanceVolumeClaimTemplate + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refer to the instance template name of the component or sharding. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ + +

+volumeClaimTemplates specifies the storage size and volumeClaimTemplate name. +

+ +
+

+JSONPatchOperation + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`op`
+ +string + + +
+ + +

+Specifies the type of JSON patch operation. It supports the following values: ‘add’, ‘remove’, ‘replace’. +

+ +
+ +`path`
+ +string + + +
+ + +

+Specifies the json patch path. +

+ +
+ +`value`
+ +string + + +
+ + +

+Specifies the value to be used in the JSON patch operation. +

+ +
+

+LastComponentConfiguration + +

+ +

+ +(Appears on:LastConfiguration) + +

+
+ +

+LastComponentConfiguration can be used to track and compare the desired state of the Component over time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Records the `replicas` of the Component prior to any changes. +

+ +
+ +`shards`
+ +int32 + + +
+ +(Optional) + +

+Records the `shards` of the Component prior to any changes. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+(Optional) + +

+Records the resources of the Component prior to any changes. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ +(Optional) + +

+Records volumes’ storage size of the Component prior to any changes. +

+ +
+ +`services`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentService + + +
+ +(Optional) + +

+Records the ClusterComponentService list of the Component prior to any changes. +

+ +
+ +`instances`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.InstanceTemplate + + +
+ +(Optional) + +

+Records the InstanceTemplate list of the Component prior to any changes. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Records the offline instances of the Component prior to any changes. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Records the version of the Service expected to be provisioned by this Component prior to any changes. +

+ +
+ +`componentDefinitionName`
+ +string + + +
+ +(Optional) + +

+Records the name of the ComponentDefinition prior to any changes. +

+ +
+

+LastConfiguration + +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/operations/v1alpha1.LastComponentConfiguration + + + +
+ +(Optional) + +

+Records the configuration of each Component prior to any changes. +

+ +
+

+MatchExpressions + +

+ +

+ +(Appears on:CompletionProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`failure`
+ +string + + +
+ +(Optional) + +

+Specifies a failure condition for an action using a Go template expression. +Should evaluate to either `true` or `false`. +The current resource object is parsed into the Go template. +for example, you can use ‘{{ eq .spec.replicas 1 }}’. +

+ +
+ +`success`
+ +string + + +
+ + +

+Specifies a success condition for an action using a Go template expression. +Should evaluate to either `true` or `false`. +The current resource object is parsed into the Go template. +for example, using ‘{{ eq .spec.replicas 1 }}’ +

+ +
+

+OpsAction + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+ +

+OpsAction specifies a custom action defined in OpsDefinition for execution in a “Custom” OpsRequest. +

+ +

+OpsAction can be of three types: +

+
    +
  • +workload: Creates a Job or Pod to run custom scripts, ideal for isolated or long-running tasks. +
  • +
  • +exec: Executes commands directly within an existing container using the kubectl exec interface, +suitable for immediate, short-lived operations. +
  • +
  • +resourceModifier: Modifies a K8s object using JSON patches, useful for updating the spec of some resource. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the OpsAction. +

+ +
+ +`failurePolicy`
+ + +FailurePolicyType + + + +
+ +(Optional) + +

+Specifies the failure policy of the OpsAction. +Valid values are: +

+
    +
  • +“Fail”: Marks the entire OpsRequest as failed if the action fails. +
  • +
  • +“Ignore”: The OpsRequest continues processing despite the failure of the action. +
  • +
+ +
+ +`parameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters for the OpsAction. Their usage varies based on the action type: +

+
    +
  • +For ‘workload’ or ‘exec’ actions, parameters are injected as environment variables. +
  • +
  • +For ‘resourceModifier’ actions, parameter can be referenced using $() in fields +`resourceModifier.completionProbe.matchExpressions` and `resourceModifier.jsonPatches[*].value`. +
  • +
+ +
+ +`workload`
+ + +OpsWorkloadAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘workload’ action. +This action leads to the creation of a K8s workload, such as a Pod or Job, to execute specified tasks. +

+ +
+ +`exec`
+ + +OpsExecAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘exec’ action. +It creates a Pod and invokes a ‘kubectl exec’ to run command inside a specified container with the target Pod. +

+ +
+ +`resourceModifier`
+ + +OpsResourceModifierAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘resourceModifier’ action. +This action allows for modifications to existing K8s objects. +

+ +

+Note: This feature has not been implemented yet. +

+ +
+

+OpsDefinitionSpec + +

+ +

+ +(Appears on:OpsDefinition) + +

+
+ +

+OpsDefinitionSpec defines the desired state of OpsDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`preConditions`
+ + +[]PreCondition + + + +
+ +(Optional) + +

+Specifies the preconditions that must be met to run the actions for the operation. +if set, it will check the condition before the Component runs this operation. +Example: +

+
+
+ preConditions:
+ - rule:
+     expression: '{{ eq .component.status.phase "Running" }}'
+     message: Component is not in Running status.
+
+
+ +
+ +`podInfoExtractors`
+ + +[]PodInfoExtractor + + + +
+ +(Optional) + +

+Specifies a list of PodInfoExtractor, each designed to select a specific Pod and extract selected runtime info +from its PodSpec. +The extracted information, such as environment variables, volumes and tolerations, are then injected into +Jobs or Pods that execute the OpsActions defined in `actions`. +

+ +
+ +`componentInfos`
+ + +[]ComponentInfo + + + +
+ +(Optional) + +

+Specifies a list of ComponentDefinition for Components associated with this OpsDefinition. +It also includes connection credentials (address and account) for each Component. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema for validating the data types and value ranges of parameters in OpsActions before their usage. +

+ +
+ +`actions`
+ + +[]OpsAction + + + +
+ + +

+Specifies a list of OpsAction where each customized action is executed sequentially. +

+ +
+

+OpsDefinitionStatus + +

+ +

+ +(Appears on:OpsDefinition) + +

+
+ +

+OpsDefinitionStatus defines the observed state of OpsDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed of this OpsDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current state of the OpsDefinition. +Valid values are “”, “Available”, “Unavailable”. +When it equals to “Available”, the OpsDefinition is ready and can be used in a “Custom” OpsRequest. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+OpsEnvVar + +

+ +

+ +(Appears on:PodInfoExtractor) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the environment variable to be injected into Pods executing OpsActions. +It must conform to the C_IDENTIFIER format, which includes only alphanumeric characters and underscores, and cannot begin with a digit. +

+ +
+ +`valueFrom`
+ + +OpsVarSource + + + +
+ + +

+Specifies the source of the environment variable’s value. +

+ +
+

+OpsExecAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podInfoExtractorName`
+ +string + + +
+ + +

+Specifies a PodInfoExtractor defined in the `opsDefinition.spec.podInfoExtractors`. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries allowed before marking the action as failed. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+The command to be executed via ‘kubectl exec –’. +

+ +
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+The name of the container in the target pod where the command should be executed. +This corresponds to the `-c {containerName}` option in `kubectl exec`. +

+ +

+If not set, the first container is used. +

+ +
+

+OpsPhase +(`string` alias) +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+ +

+OpsPhase defines opsRequest phase. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Aborted" +

+
+ +
+ +

+"Cancelled" +

+
+ +
+ +

+"Cancelling" +

+
+ +
+ +

+"Creating" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+OpsRecorder + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+name OpsRequest name +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+opsRequest type +

+ +
+ +`inQueue`
+ +bool + + +
+ + +

+indicates whether the current opsRequest is in the queue +

+ +
+ +`queueBySelf`
+ +bool + + +
+ + +

+indicates that the operation is queued for execution within its own-type scope. +

+ +
+

+OpsRequestBehaviour + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FromClusterPhases`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterPhase + + +
+ + +
+ +`ToClusterPhase`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ClusterPhase + + +
+ + +
+

+OpsRequestComponentStatus + +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ComponentPhase + + +
+ +(Optional) + +

+Records the current phase of the Component, mirroring `cluster.status.components[componentName].phase`. +

+ +
+ +`lastFailedTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the timestamp when the Component last transitioned to a “Failed” phase. +

+ +
+ +`preCheck`
+ + +PreCheckResult + + + +
+ +(Optional) + +

+Records the result of the preConditions check of the opsRequest, which determines subsequent steps. +

+ +
+ +`progressDetails`
+ + +[]ProgressStatusDetail + + + +
+ +(Optional) + +

+Describes the progress details of objects or actions associated with the Component. +

+ +
+ +`reason`
+ +string + + +
+ +(Optional) + +

+Provides an explanation for the Component being in its current state. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable message indicating details about this operation. +

+ +
+

+OpsRequestSpec + +

+ +

+ +(Appears on:OpsRequest) + +

+
+ +

+OpsRequestSpec defines the desired state of OpsRequest +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`cancel`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the current operation should be canceled and terminated gracefully if it’s in the +“Pending”, “Creating”, or “Running” state. +

+ +

+This field applies only to “VerticalScaling” and “HorizontalScaling” opsRequests. +

+ +

+Note: Setting `cancel` to true is irreversible; further modifications to this field are ineffective. +

+ +
+ +`force`
+ +bool + + +
+ +(Optional) + +

+Instructs the system to bypass pre-checks (including cluster state checks and customized pre-conditions hooks) +and immediately execute the opsRequest, except for the opsRequest of ‘Start’ type, which will still undergo +pre-checks even if `force` is true. +

+ +

+This is useful for concurrent execution of ‘VerticalScaling’ and ‘HorizontalScaling’ opsRequests. +By setting `force` to true, you can bypass the default checks and demand these opsRequests to run +simultaneously. +

+ +

+Note: Once set, the `force` field is immutable and cannot be updated. +

+ +
+ +`enqueueOnForce`
+ +bool + + +
+ +(Optional) + +

+Indicates whether opsRequest should continue to queue when ‘force’ is set to true. +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, +“Expose”, “RebuildInstance”, “Custom”. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`ttlSecondsAfterSucceed`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after successfully completing +(when `opsRequest.status.phase` is “Succeed”) before automatic deletion. +

+ +
+ +`ttlSecondsAfterUnsuccessfulCompletion`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after completion +for any phase other than “Succeed” (e.g., “Failed”, “Cancelled”, “Aborted”) before automatic deletion. +

+ +
+ +`preConditionDeadlineSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum time in seconds that the OpsRequest will wait for its pre-conditions to be met +before it aborts the operation. +If set to 0 (default), pre-conditions must be satisfied immediately for the OpsRequest to proceed. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration (in seconds) that an opsRequest is allowed to run. +If the opsRequest runs longer than this duration, its phase will be marked as Aborted. +If this value is not set or set to 0, the timeout will be ignored and the opsRequest will run indefinitely. +

+ +
+ +`SpecificOpsRequest`
+ + +SpecificOpsRequest + + + +
+ + +

+ +(Members of `SpecificOpsRequest` are embedded into this type.) + +

+ +

+Exactly one of its members must be set. +

+ +
+

+OpsRequestStatus + +

+ +

+ +(Appears on:OpsRequest) + +

+
+ +

+OpsRequestStatus represents the observed state of an OpsRequest. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterGeneration`
+ +int64 + + +
+ +(Optional) + +

+Records the cluster generation after the OpsRequest action has been handled. +

+ +
+ +`phase`
+ + +OpsPhase + + + +
+ + +

+Represents the phase of the OpsRequest. +Possible values include “Pending”, “Creating”, “Running”, “Cancelling”, “Cancelled”, “Failed”, “Succeed”. +

+ +
+ +`progress`
+ +string + + +
+ + +

+Represents the progress of the OpsRequest. +

+ +
+ +`lastConfiguration`
+ + +LastConfiguration + + + +
+ +(Optional) + +

+Records the configuration prior to any changes. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/operations/v1alpha1.OpsRequestComponentStatus + + + +
+ +(Optional) + +

+Records the status information of Components changed due to the OpsRequest. +

+ +
+ +`extras`
+ +[]string + + +
+ + +

+A collection of additional key-value pairs that provide supplementary information for the OpsRequest. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest started processing. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest was completed. +

+ +
+ +`cancelTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest was cancelled. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the detailed status of the OpsRequest. +Possible condition types include “Cancelled”, “WaitForProgressing”, “Validated”, “Succeed”, “Failed”, “Restarting”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpanding”, “Reconfigure”, “Switchover”, “Stopping”, “Starting”, +“VersionUpgrading”, “Exposing”, “Backup”, “InstancesRebuilding”, “CustomOperation”. +

+ +
+

+OpsRequestVolumeClaimTemplate + +

+ +

+ +(Appears on:InstanceVolumeClaimTemplate, LastComponentConfiguration, VolumeExpansion) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storage`
+ + +Kubernetes resource.Quantity + + + +
+ + +

+Specifies the desired storage size for the volume. +

+ +
+ +`name`
+ +string + + +
+ + +

+Specify the name of the volumeClaimTemplate in the Component. +The specified name must match one of the volumeClaimTemplates defined +in the `clusterComponentSpec.volumeClaimTemplates` field. +

+ +
+

+OpsResourceModifierAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`resource`
+ + +TypedObjectRef + + + +
+ + +

+Specifies the K8s object that is to be updated. +

+ +
+ +`jsonPatches`
+ + +[]JSONPatchOperation + + + +
+ + +

+Specifies a list of patches for modifying the object. +

+ +
+ +`completionProbe`
+ + +CompletionProbe + + + +
+ + +

+Specifies a method to determine if the action has been completed. +

+ +

+Note: This feature has not been implemented yet. +

+ +
+

+OpsService + +

+ +

+ +(Appears on:Expose) + +

+
+ +

+OpsService represents the parameters to dynamically create or remove a ClusterService in the `cluster.spec.services` array. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the Service. This name is used to set `clusterService.name`. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Contains cloud provider related parameters if ServiceType is LoadBalancer. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ +(Optional) + +

+Specifies Port definitions that are to be exposed by a ClusterService. +

+ +

+If not specified, the Port definitions from non-NodePort and non-LoadBalancer type ComponentService +defined in the ComponentDefinition (`componentDefinition.spec.services`) will be used. +If no matching ComponentService is found, the expose operation will fail. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports +

+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Specifies a role to target with the service. +If specified, the service will only be exposed to pods with the matching role. +

+ +

+Note: If the component has roles, at least one of ‘roleSelector’ or ‘podSelector’ must be specified. +If both are specified, a pod must match both conditions to be selected. +

+ +
+ +`podSelector`
+ +map[string]string + + +
+ +(Optional) + +

+Routes service traffic to pods with matching label keys and values. +If specified, the service will only be exposed to pods matching the selector. +

+ +

+Note: If the component has roles, at least one of ‘roleSelector’ or ‘podSelector’ must be specified. +If both are specified, a pod must match both conditions to be selected. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Defaults to ‘ClusterIP’. +Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP`: allocates a cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort`: builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. +
  • +
  • +`LoadBalancer`: builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the clusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for the expose operation. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+A list of IP families (e.g., IPv4, IPv6) assigned to this Service. +

+ +

+Usually assigned automatically based on the cluster configuration and the `ipFamilyPolicy` field. +If specified manually, the requested IP family must be available in the cluster and allowed by the `ipFamilyPolicy`. +If the requested IP family is not available or not allowed, the Service creation will fail. +

+ +

+Valid values: +

+
    +
  • +“IPv4” +
  • +
  • +“IPv6” +
  • +
+ +

+This field may hold a maximum of two entries (dual-stack families, in either order). +

+ +

+Common combinations of `ipFamilies` and `ipFamilyPolicy` are: +

+
    +
  • +ipFamilies=[] + ipFamilyPolicy=“PreferDualStack” : +The Service prefers dual-stack but can fall back to single-stack if the cluster does not support dual-stack. +The IP family is automatically assigned based on the cluster configuration. +
  • +
  • +ipFamilies=[“IPV4”,“IPV6”] + ipFamilyPolicy=“RequiredDualStack” : +The Service requires dual-stack and will only be created if the cluster supports both IPv4 and IPv6. +The primary IP family is IPV4. +
  • +
  • +ipFamilies=[“IPV6”,“IPV4”] + ipFamilyPolicy=“RequiredDualStack” : +The Service requires dual-stack and will only be created if the cluster supports both IPv4 and IPv6. +The primary IP family is IPV6. +
  • +
  • +ipFamilies=[“IPV4”] + ipFamilyPolicy=“SingleStack” : +The Service uses a single-stack with IPv4 only. +
  • +
  • +ipFamilies=[“IPV6”] + ipFamilyPolicy=“SingleStack” : +The Service uses a single-stack with IPv6 only. +
  • +
+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+Specifies whether the Service should use a single IP family (SingleStack) or two IP families (DualStack). +

+ +

+Possible values: +

+
    +
  • +‘SingleStack’ (default) : The Service uses a single IP family. +If no value is provided, IPFamilyPolicy defaults to SingleStack. +
  • +
  • +‘PreferDualStack’ : The Service prefers to use two IP families on dual-stack configured clusters +or a single IP family on single-stack clusters. +
  • +
  • +‘RequiredDualStack’ : The Service requires two IP families on dual-stack configured clusters. +If the cluster is not configured for dual-stack, the Service creation fails. +
  • +
+ +
+

+OpsType +(`string` alias) +

+ +

+ +(Appears on:OpsRecorder, OpsRequestSpec) + +

+
+ +

+OpsType defines operation types. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Backup" +

+
+ +
+ +

+"Custom" +

+
+ +

+RebuildInstance rebuilding an instance is very useful when a node is offline or an instance is unrecoverable. +

+ +
+ +

+"Expose" +

+
+ +

+StartType the start operation will start the pods which is deleted in stop operation. +

+ +
+ +

+"HorizontalScaling" +

+
+ +
+ +

+"RebuildInstance" +

+
+ +
+ +

+"Reconfiguring" +

+
+ +
+ +

+"Restart" +

+
+ +
+ +

+"Restore" +

+
+ +
+ +

+"Start" +

+
+ +

+StopType the stop operation will delete all pods in a cluster concurrently. +

+ +
+ +

+"Stop" +

+
+ +

+RestartType the restart operation is a special case of the rolling update operation. +

+ +
+ +

+"Switchover" +

+
+ +
+ +

+"Upgrade" +

+
+ +
+ +

+"VerticalScaling" +

+
+ +
+ +

+"VolumeExpansion" +

+
+ +
+

+OpsVarSource + +

+ +

+ +(Appears on:OpsEnvVar) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`envRef`
+ + +EnvVarRef + + + +
+ +(Optional) + +

+Specifies a reference to a specific environment variable within a container. +Used to specify the source of the variable, which can be either “env” or “envFrom”. +

+ +
+ +`fieldPath`
+ + +Kubernetes core/v1.ObjectFieldSelector + + + +
+ +(Optional) + +

+Represents the JSONPath expression pointing to the specific data within the JSON structure of the target Pod. +It is used to extract precise data locations for operations on the Pod. +

+ +
+

+OpsWorkloadAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +OpsWorkloadType + + + +
+ + +

+Defines the workload type of the action. Valid values include “Job” and “Pod”. +

+
    +
  • +“Job”: Creates a Job to execute the action. +
  • +
  • +“Pod”: Creates a Pod to execute the action. +Note: unlike Jobs, manually deleting a Pod does not affect the `backoffLimit`. +
  • +
+ +
+ +`podInfoExtractorName`
+ +string + + +
+ + +

+Specifies a PodInfoExtractor defined in the `opsDefinition.spec.podInfoExtractors`. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries allowed before marking the action as failed. +

+ +
+ +`podSpec`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec of the ‘workload’ action. +

+ +
+

+OpsWorkloadType +(`string` alias) +

+ +

+ +(Appears on:OpsWorkloadAction) + +

+
+ +

+OpsWorkloadType policy after action failure. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Job" +

+
+ +
+ +

+"Pod" +

+
+ +
+

+Parameter + +

+ +

+ +(Appears on:CustomOpsComponent) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the parameter as defined in the OpsDefinition. +

+ +
+ +`value`
+ +string + + +
+ + +

+Holds the data associated with the parameter. +If the parameter type is an array, the format should be “v1,v2,v3”. +

+ +
+ +`valueFrom`
+ + +ParameterSource + + + +
+ + +

+Source for the parameter’s value. Cannot be used if value is not empty. +

+ +
+

+ParameterPair + +

+ +

+ +(Appears on:Reconfigure) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ +string + + +
+ + +

+Represents the name of the parameter that is to be updated. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Represents the parameter values that are to be updated. +If set to nil, the parameter defined by the Key field will be removed from the configuration file. +

+ +
+

+ParameterSource + +

+ +

+ +(Appears on:Parameter) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the schema for parameters using the OpenAPI v3. +The supported property types include: +- string +- number +- integer +- array: Note that only items of string type are supported. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:OpsDefinitionStatus) + +

+
+ +

+Phase represents the current status of the ClusterDefinition CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that the object is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that the object is in an unavailable state. +

+ +
+

+PodInfoExtractor + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the PodInfoExtractor. +

+ +
+ +`env`
+ + +[]OpsEnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be extracted from a selected Pod, +and injected into the containers executing each OpsAction. +

+ +
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Used to select the target Pod from which environment variables and volumes are extracted from its PodSpec. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Specifies a list of volumes, along with their respective mount points, that are to be extracted from a selected Pod, +and mounted onto the containers executing each OpsAction. +This allows the containers to access shared or persistent data necessary for the operation. +

+ +
+

+PodSelectionPolicy +(`string` alias) +

+ +

+ +(Appears on:PodSelector) + +

+
+ +

+PodSelectionPolicy pod selection strategy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+

+PodSelector + +

+ +

+ +(Appears on:PodInfoExtractor) + +

+
+ +

+PodSelector selects the target Pod from which environment variables and volumes are extracted from its PodSpec. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ +(Optional) + +

+Specifies the role of the target Pod. +

+ +
+ +`multiPodSelectionPolicy`
+ + +PodSelectionPolicy + + + +
+ + +

+Defines the policy for selecting the target pod when multiple pods match the podSelector. +It can be either ‘Any’ (select any one pod that matches the podSelector) +or ‘All’ (select all pods that match the podSelector). +

+ +
+

+PointInTimeRefSpec + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`time`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Refers to the specific time point for restoration, with UTC as the time zone. +

+ +
+ +`ref`
+ + +RefNamespaceName + + + +
+ +(Optional) + +

+Refers to a reference source cluster that needs to be restored. +

+ +
+

+PreCheckResult + +

+ +

+ +(Appears on:OpsRequestComponentStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`pass`
+ +bool + + +
+ + +

+Indicates whether the preCheck operation passed or failed. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides explanations related to the preCheck result in a human-readable format. +

+ +
+

+PreCondition + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`rule`
+ + +Rule + + + +
+ + +

+Specifies the conditions that must be met for the operation to execute. +

+ +
+

+ProgressStatus +(`string` alias) +

+ +

+ +(Appears on:ProgressStatusDetail) + +

+
+ +

+ProgressStatus defines the status of the opsRequest progress. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Processing" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+ProgressStatusDetail + +

+ +

+ +(Appears on:OpsRequestComponentStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`group`
+ +string + + +
+ +(Optional) + +

+Specifies the group to which the current object belongs to. +

+ +
+ +`objectKey`
+ +string + + +
+ +(Optional) + +

+`objectKey` uniquely identifies the object, which can be any K8s object, like a Pod, Job, Component, or PVC. +Either `objectKey` or `actionName` must be provided. +

+ +
+ +`actionName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of an OpsAction, as defined in `opsDefinition.spec.actions[*].name`. +Either `objectKey` or `actionName` must be provided. +

+ +
+ +`actionTasks`
+ + +[]ActionTask + + + +
+ +(Optional) + +

+Lists the tasks, such as Jobs or Pods, that carry out the action. +

+ +
+ +`status`
+ + +ProgressStatus + + + +
+ + +

+Represents the current processing state of the object, including “Processing”, “Pending”, “Failed”, “Succeed” +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation of the object’s condition. +

+ +
+ +`startTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the start time of object processing. +

+ +
+ +`endTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the completion time of object processing. +

+ +
+

+RebuildInstance + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`instances`
+ + +[]Instance + + + +
+ + +

+Specifies the instances (Pods) that need to be rebuilt, typically operating as standbys. +

+ +
+ +`inPlace`
+ +bool + + +
+ + +

+When it is set to true, the instance will be rebuilt in-place. +If false, a new pod will be created. Once the new pod is ready to serve, +the instance that require rebuilding will be taken offline. +

+ +
+ +`backupName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of the Backup custom resource from which to recover the instance. +Defaults to an empty PersistentVolume if unspecified. +

+ +

+Note: +- Only full physical backups are supported for multi-replica Components (e.g., ‘xtrabackup’ for MySQL). +- Logical backups (e.g., ‘mysqldump’ for MySQL) are unsupported in the current version. +

+ +
+ +`sourceBackupTargetName`
+ +string + + +
+ +(Optional) + +

+When multiple source targets exist of the backup, you must specify the source target to restore. +

+ +
+ +`restoreEnv`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines container environment variables for the restore process. +merged with the ones specified in the Backup and ActionSet resources. +

+ +

+Merge priority: Restore env > Backup env > ActionSet env. +

+ +

+Purpose: Some databases require different configurations when being restored as a standby +compared to being restored as a primary. +For example, when restoring MySQL as a replica, you need to set `skip_slave_start="ON"` for 5.7 +or `skip_replica_start="ON"` for 8.0. +Allowing environment variables to be passed in makes it more convenient to control these behavioral differences +during the restore process. +

+ +
+

+Reconfigure + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+Reconfigure defines the parameters for updating a Component’s configuration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of key-value pairs representing parameters and their corresponding values +within a single configuration file. +This field is used to override or set the values of parameters without modifying the entire configuration file. +

+ +
+

+RefNamespaceName + +

+ +

+ +(Appears on:BackupRefSpec, PointInTimeRefSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Refers to the specific name of the resource. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Refers to the specific namespace of the resource. +

+ +
+

+ReplicaChanger + +

+ +

+ +(Appears on:ScaleIn, ScaleOut) + +

+
+ +

+ReplicaChanger defines the parameters for changing the number of replicas. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicaChanges`
+ +int32 + + +
+ + +

+Specifies the replica changes for the component. +

+ +
+ +`instances`
+ + +[]InstanceReplicasTemplate + + + +
+ +(Optional) + +

+Modifies the desired replicas count for existing InstanceTemplate. +if the inst +

+ +
+

+Restore + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupName`
+ +string + + +
+ + +

+Specifies the name of the Backup custom resource. +

+ +
+ +`backupNamespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the backup custom resource. If not specified, the namespace of the opsRequest will be used. +

+ +
+ +`restorePointInTime`
+ +string + + +
+ + +

+Specifies the point in time to which the restore should be performed. +Supported time formats: +

+
    +
  • +RFC3339 format, e.g. “2023-11-25T18:52:53Z” +
  • +
  • +A human-readable date-time format, e.g. “Jul 25,2023 18:52:53 UTC+0800” +
  • +
+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`volumeRestorePolicy`
+ +string + + +
+ + +

+Specifies the policy for restoring volume claims of a Component’s Pods. +It determines whether the volume claims should be restored sequentially (one by one) or in parallel (all at once). +Support values: +

+
    +
  • +“Serial” +
  • +
  • +“Parallel” +
  • +
+ +
+ +`deferPostReadyUntilClusterRunning`
+ +bool + + +
+ + +

+Controls the timing of PostReady actions during the recovery process. +

+ +

+If false (default), PostReady actions execute when the Component reaches the “Running” state. +If true, PostReady actions are delayed until the entire Cluster is “Running,” +ensuring the cluster’s overall stability before proceeding. +

+ +

+This setting is useful for coordinating PostReady operations across the Cluster for optimal cluster conditions. +

+ +
+ +`parameters`
+ +[]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ParameterPair + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+Rule + +

+ +

+ +(Appears on:PreCondition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`expression`
+ +string + + +
+ + +

+Specifies a Go template expression that determines how the operation can be executed. +The return value must be either `true` or `false`. +Available built-in objects that can be referenced in the expression include: +

+
    +
  • +`params`: Input parameters. +
  • +
  • +`cluster`: The referenced Cluster object. +
  • +
  • +`component`: The referenced Component object. +
  • +
+ +
+ +`message`
+ +string + + +
+ + +

+Specifies the error or status message reported if the `expression` does not evaluate to `true`. +

+ +
+

+ScaleIn + +

+ +

+ +(Appears on:HorizontalScaling) + +

+
+ +

+ScaleIn defines the configuration for a scale-in operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ReplicaChanger`
+ + +ReplicaChanger + + + +
+ + +

+ +(Members of `ReplicaChanger` are embedded into this type.) + +

+ +

+Modifies the replicas of the component and instance templates. +

+ +
+ +`onlineInstancesToOffline`
+ +[]string + + +
+ +(Optional) + +

+Specifies the instance names that need to be taken offline. +

+ +
+

+ScaleOut + +

+ +

+ +(Appears on:HorizontalScaling) + +

+
+ +

+ScaleOut defines the configuration for a scale-out operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ReplicaChanger`
+ + +ReplicaChanger + + + +
+ + +

+ +(Members of `ReplicaChanger` are embedded into this type.) + +

+ +

+Modifies the replicas of the component and instance templates. +

+ +
+ +`newInstances`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.InstanceTemplate + + +
+ +(Optional) + +

+Defines the configuration for new instances added during scaling, including resource requirements, labels, annotations, etc. +New instances are created based on the provided instance templates. +

+ +
+ +`offlineInstancesToOnline`
+ +[]string + + +
+ +(Optional) + +

+Specifies the instances in the offline list to bring back online. +

+ +
+

+SpecificOpsRequest + +

+ +

+ +(Appears on:OpsRequestSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`upgrade`
+ + +Upgrade + + + +
+ +(Optional) + +

+Specifies the desired new version of the Cluster. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`horizontalScaling`
+ + +[]HorizontalScaling + + + +
+ +(Optional) + +

+Lists HorizontalScaling objects, each specifying scaling requirements for a Component, +including desired replica changes, configurations for new instances, modifications for existing instances, +and take offline/online the specified instances. +

+ +
+ +`volumeExpansion`
+ + +[]VolumeExpansion + + + +
+ +(Optional) + +

+Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates +that requires storage expansion. +

+ +
+ +`start`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be started. If empty, all components will be started. +

+ +
+ +`stop`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be stopped. If empty, all components will be stopped. +

+ +
+ +`restart`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be restarted. +

+ +
+ +`switchover`
+ + +[]Switchover + + + +
+ +(Optional) + +

+Lists Switchover objects, each specifying a Component to perform the switchover operation. +

+ +
+ +`verticalScaling`
+ + +[]VerticalScaling + + + +
+ +(Optional) + +

+Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. +

+ +
+ +`reconfigures`
+ + +[]Reconfigure + + + +
+ +(Optional) + +

+Lists Reconfigure objects, each specifying a Component and its configuration updates. +

+ +
+ +`expose`
+ + +[]Expose + + + +
+ +(Optional) + +

+Lists Expose objects, each specifying a Component and its services to be exposed. +

+ +
+ +`backup`
+ + +Backup + + + +
+ +(Optional) + +

+Specifies the parameters to back up a Cluster. +

+ +
+ +`restore`
+ + +Restore + + + +
+ +(Optional) + +

+Specifies the parameters to restore a Cluster. +Note that this restore operation will roll back cluster services. +

+ +
+ +`rebuildFrom`
+ + +[]RebuildInstance + + + +
+ +(Optional) + +

+Specifies the parameters to rebuild some instances. +Rebuilding an instance involves restoring its data from a backup or another database replica. +The instances being rebuilt usually serve as standby in the cluster. +Hence, rebuilding instances is often also referred to as “standby reconstruction”. +

+ +
+ +`custom`
+ + +CustomOps + + + +
+ +(Optional) + +

+Specifies a custom operation defined by OpsDefinition. +

+ +
+

+Switchover + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component as defined in the cluster.spec. +

+ +
+ +`componentObjectName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component object. +

+ +
+ +`instanceName`
+ +string + + +
+ + +

+Specifies the instance whose role will be transferred. A typical usage is to transfer the leader role +in a consensus system. +

+ +
+ +`candidateName`
+ +string + + +
+ +(Optional) + +

+If CandidateName is specified, the role will be transferred to this instance. +The name must match one of the pods in the component. +Refer to ComponentDefinition’s Swtichover lifecycle action for more details. +

+ +
+

+TypedObjectRef + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiGroup`
+ +string + + +
+ + +

+Specifies the group for the resource being referenced. +If not specified, the referenced Kind must belong to the core API group. +For all third-party types, this is mandatory. +

+ +
+ +`kind`
+ +string + + +
+ + +

+Specifies the type of resource being referenced. +

+ +
+ +`name`
+ +string + + +
+ + +

+Indicates the name of the resource being referenced. +

+ +
+

+UpdatedParameters + +

+
+ +

+UpdatedParameters holds details about the modifications made to configuration parameters. +Example: +

+
+
+updatedParameters:
+	updatedKeys:
+	  my.cnf: '{"mysqld":{"max_connections":"100"}}'
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`addedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Maps newly added configuration files to their content. +

+ +
+ +`deletedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Lists the name of configuration files that have been deleted. +

+ +
+ +`updatedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Maps the name of configuration files to their updated content, detailing the changes made. +

+ +
+

+Upgrade + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+Upgrade defines the parameters for an upgrade operation. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`components`
+ + +[]UpgradeComponent + + + +
+ +(Optional) + +

+Lists components to be upgrade based on desired ComponentDefinition and ServiceVersion. +From the perspective of cluster API, the reasonable combinations should be: +1. (comp-def, service-ver) - upgrade to the specified service version and component definition, the user takes the responsibility to ensure that they are compatible. +2. (“”, service-ver) - upgrade to the specified service version, let the operator choose the latest compatible component definition. +3. (comp-def, “”) - upgrade to the specified component definition, let the operator choose the latest compatible service version. +4. (“”, “”) - upgrade to the latest service version and component definition, the operator will ensure the compatibility between the selected versions. +

+ +
+

+UpgradeComponent + +

+ +

+ +(Appears on:Upgrade) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`componentDefinitionName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ComponentDefinition, only exact matches are supported. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service expected to be provisioned by this Component. +Referring to the ServiceVersion defined by the ComponentDefinition and ComponentVersion. +And ServiceVersion in ClusterComponentSpec is optional, when no version is specified, +use the latest available version in ComponentVersion. +

+ +
+

+VerticalScaling + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+VerticalScaling refers to the process of adjusting compute resources (e.g., CPU, memory) allocated to a Component. +It defines the parameters required for the operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+ +

+Defines the desired compute resources of the Component’s instances. +

+ +
+ +`instances`
+ + +[]InstanceResourceTemplate + + + +
+ +(Optional) + +

+Specifies the desired compute resources of the instance template that need to vertical scale. +

+ +
+

+VolumeExpansion + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+VolumeExpansion encapsulates the parameters required for a volume expansion operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ + +

+Specifies a list of OpsRequestVolumeClaimTemplate objects, defining the volumeClaimTemplates +that are used to expand the storage and the desired storage size for each one. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/api-reference/parameters.mdx b/docs/en/release-1_0_1/user_docs/references/api-reference/parameters.mdx new file mode 100644 index 00000000..9db568a9 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/api-reference/parameters.mdx @@ -0,0 +1,5770 @@ +--- +title: Parameters API Reference +description: Parameters API Reference +keywords: [parameters, api] +sidebar_position: 3 +sidebar_label: Parameters +--- +
+ +

+Packages: +

+ +

parameters.kubeblocks.io/v1alpha1

+Resource Types: + +

+ComponentParameter + +

+
+ +

+ComponentParameter is the Schema for the componentparameters API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentParameter` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentParameterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigTemplateItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigTemplateItemDetail objects. +

+ +

+Each ConfigTemplateItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigTemplateItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+ +
+ +`status`
+ + +ComponentParameterStatus + + + +
+ + +
+

+ParamConfigRenderer + +

+
+ +

+ParamConfigRenderer is the Schema for the paramconfigrenderers API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ParamConfigRenderer` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParamConfigRendererSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`componentDef`
+ +string + + +
+ + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`parametersDefs`
+ +[]string + + +
+ +(Optional) + +

+Specifies the ParametersDefinition custom resource (CR) that defines the Component parameter’s schema and behavior. +

+ +
+ +`configs`
+ + +[]ComponentConfigDescription + + + +
+ +(Optional) + +

+Specifies the configuration files. +

+ +
+ +
+ +`status`
+ + +ParamConfigRendererStatus + + + +
+ + +
+

+Parameter + +

+
+ +

+Parameter is the Schema for the parameters API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Parameter` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParameterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`componentParameters`
+ + +[]ComponentParametersSpec + + + +
+ + +

+Lists ComponentParametersSpec objects, each specifying a Component and its parameters and template updates. +

+ +
+ +
+ +`status`
+ + +ParameterStatus + + + +
+ + +
+

+ParametersDefinition + +

+
+ +

+ParametersDefinition is the Schema for the parametersdefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ParametersDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParametersDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`fileName`
+ +string + + +
+ +(Optional) + +

+Specifies the config file name in the config template. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`deletedPolicy`
+ + +ParameterDeletedPolicy + + + +
+ +(Optional) + +

+Specifies the policy when parameter be removed. +

+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +
+ +`status`
+ + +ParametersDefinitionStatus + + + +
+ + +
+

+AutoTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+AutoTrigger automatically perform the reload when specified conditions are met. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`processName`
+ +string + + +
+ +(Optional) + +

+The name of the process. +

+ +
+

+CfgFileFormat +(`string` alias) +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+CfgFileFormat defines formatter of configuration files. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"dotenv" +

+
+ +
+ +

+"hcl" +

+
+ +
+ +

+"ini" +

+
+ +
+ +

+"json" +

+
+ +
+ +

+"properties" +

+
+ +
+ +

+"props-plus" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"toml" +

+
+ +
+ +

+"xml" +

+
+ +
+ +

+"yaml" +

+
+ +
+

+ComponentConfigDescription + +

+ +

+ +(Appears on:ParamConfigRendererSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the config file name in the config template. +

+ +
+ +`templateName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced componentTemplateSpec. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ +(Optional) + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +`reRenderResourceTypes`
+ + +[]RerenderResourceType + + + +
+ +(Optional) + +

+Specifies whether the configuration needs to be re-rendered after v-scale or h-scale operations to reflect changes. +

+ +

+In some scenarios, the configuration may need to be updated to reflect the changes in resource allocation +or cluster topology. Examples: +

+
    +
  • +Redis: adjust maxmemory after v-scale operation. +
  • +
  • +MySQL: increase max connections after v-scale operation. +
  • +
  • +Zookeeper: update zoo.cfg with new node addresses after h-scale operation. +
  • +
+ +
+

+ComponentParameterSpec + +

+ +

+ +(Appears on:ComponentParameter) + +

+
+ +

+ComponentParameterSpec defines the desired state of ComponentConfiguration +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigTemplateItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigTemplateItemDetail objects. +

+ +

+Each ConfigTemplateItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigTemplateItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+

+ComponentParameterStatus + +

+ +

+ +(Appears on:ComponentParameter) + +

+
+ +

+ComponentParameterStatus defines the observed state of ComponentConfiguration +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides detailed status information for opsRequest. +

+ +
+ +`configurationStatus`
+ + +[]ConfigTemplateItemDetailStatus + + + +
+ + +

+Provides the status of each component undergoing reconfiguration. +

+ +
+

+ComponentParameters +(`map[string]*string` alias) +

+ +

+ +(Appears on:ComponentParametersSpec) + +

+
+
+

+ComponentParametersSpec + +

+ +

+ +(Appears on:ParameterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +ComponentParameters + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template or parameters. +

+ +
+ +`userConfigTemplates`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+

+ComponentReconfiguringStatus + +

+ +

+ +(Appears on:ParameterStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`parameterStatus`
+ + +[]ReconfiguringStatus + + + +
+ + +

+Describes the status of the component reconfiguring. +

+ +
+

+ConfigTemplateExtension + +

+ +

+ +(Appears on:ComponentParametersSpec, ConfigTemplateItemDetail, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templateRef`
+ +string + + +
+ + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`policy`
+ + +MergedPolicy + + + +
+ +(Optional) + +

+Defines the strategy for merging externally imported templates into component templates. +

+ +
+

+ConfigTemplateItemDetail + +

+ +

+ +(Appears on:ComponentParameterSpec) + +

+
+ +

+ConfigTemplateItemDetail corresponds to settings of a configuration template (a ConfigMap). +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the configuration template. +

+ +

+It must be a string of maximum 63 characters, and can only include lowercase alphanumeric characters, +hyphens, and periods. +The name must start and end with an alphanumeric character. +

+ +
+ +`payload`
+ + +Payload + + + +
+ +(Optional) + +

+External controllers can trigger a configuration rerender by modifying this field. +

+ +

+Note: Currently, the `payload` field is opaque and its content is not interpreted by the system. +Modifying this field will cause a rerender, regardless of the specific content of this field. +

+ +
+ +`configSpec`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ComponentFileTemplate + + +
+ +(Optional) + +

+Specifies the name of the configuration template (a ConfigMap), ConfigConstraint, and other miscellaneous options. +

+ +

+The configuration template is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+ConfigConstraint allows defining constraints and validation rules for configuration parameters. +It ensures that the configuration adheres to certain requirements and limitations. +

+ +
+ +`userConfigTemplates`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+ +`configFileParams`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ParametersInFile + + + +
+ +(Optional) + +

+Specifies the user-defined configuration parameters. +

+ +

+When provided, the parameter values in `configFileParams` override the default configuration parameters. +This allows users to override the default configuration according to their specific needs. +

+ +
+

+ConfigTemplateItemDetailStatus + +

+ +

+ +(Appears on:ComponentParameterStatus, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. It is a required field and must be a string of maximum 63 characters. +The name should only contain lowercase alphanumeric characters, hyphens, or periods. It should start and end with an alphanumeric character. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`lastDoneRevision`
+ +string + + +
+ +(Optional) + +

+Represents the last completed revision of the configuration item. This field is optional. +

+ +
+ +`updateRevision`
+ +string + + +
+ +(Optional) + +

+Represents the updated revision of the configuration item. This field is optional. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. This field is optional. +

+ +
+ +`reconcileDetail`
+ + +ReconcileDetail + + + +
+ +(Optional) + +

+Provides detailed information about the execution of the configuration change. This field is optional. +

+ +
+

+DownwardAPIChangeTriggeredAction + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+DownwardAPIChangeTriggeredAction defines an action that triggers specific commands in response to changes in Pod labels. +For example, a command might be executed when the ‘role’ label of the Pod is updated. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the field. It must be a string of maximum length 63. +The name should match the regex pattern `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$`. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the mount point of the Downward API volume. +

+ +
+ +`items`
+ + +[]Kubernetes core/v1.DownwardAPIVolumeFile + + + +
+ + +

+Represents a list of files under the Downward API volume. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be triggered when changes are detected in Downward API volume files. +It relies on the inotify mechanism in the config-manager sidecar to monitor file changes. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the DownwardAction to perform specific tasks or configurations. +

+ +
+

+DynamicParameterSelectedPolicy +(`string` alias) +

+
+ +

+DynamicParameterSelectedPolicy determines how to select the parameters of dynamic reload actions +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"all" +

+
+ +
+ +

+"dynamic" +

+
+ +
+

+DynamicReloadType +(`string` alias) +

+
+ +

+DynamicReloadType defines reload method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"auto" +

+
+ +
+ +

+"http" +

+
+ +
+ +

+"sql" +

+
+ +
+ +

+"exec" +

+
+ +
+ +

+"tpl" +

+
+ +
+ +

+"signal" +

+
+ +
+

+FileFormatConfig + +

+ +

+ +(Appears on:ComponentConfigDescription) + +

+
+ +

+FileFormatConfig specifies the format of the configuration file and any associated parameters +that are specific to the chosen format. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FormatterAction`
+ + +FormatterAction + + + +
+ + +

+ +(Members of `FormatterAction` are embedded into this type.) + +

+(Optional) + +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +
+ +`format`
+ + +CfgFileFormat + + + +
+ + +

+The config file format. Valid values are `ini`, `xml`, `yaml`, `json`, +`hcl`, `dotenv`, `properties` and `toml`. Each format has its own characteristics and use cases. +

+ + +
+

+FormatterAction + +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+FormatterAction configures format-specific options for different configuration file format. +Note: Only one of its members should be specified at any given time. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`iniConfig`
+ + +IniConfig + + + +
+ +(Optional) + +

+Holds options specific to the ‘ini’ file format. +

+ +
+

+IniConfig + +

+ +

+ +(Appears on:FormatterAction) + +

+
+ +

+IniConfig holds options specific to the ‘ini’ file format. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`sectionName`
+ +string + + +
+ +(Optional) + +

+A string that describes the name of the ini section. +

+ +
+

+MergedPolicy +(`string` alias) +

+ +

+ +(Appears on:ConfigTemplateExtension) + +

+
+ +

+MergedPolicy defines how to merge external imported templates into component templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"none" +

+
+ +
+ +

+"add" +

+
+ +
+ +

+"patch" +

+
+ +
+ +

+"replace" +

+
+ +
+

+ParamConfigRendererSpec + +

+ +

+ +(Appears on:ParamConfigRenderer) + +

+
+ +

+ParamConfigRendererSpec defines the desired state of ParamConfigRenderer +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDef`
+ +string + + +
+ + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`parametersDefs`
+ +[]string + + +
+ +(Optional) + +

+Specifies the ParametersDefinition custom resource (CR) that defines the Component parameter’s schema and behavior. +

+ +
+ +`configs`
+ + +[]ComponentConfigDescription + + + +
+ +(Optional) + +

+Specifies the configuration files. +

+ +
+

+ParamConfigRendererStatus + +

+ +

+ +(Appears on:ParamConfigRenderer) + +

+
+ +

+ParamConfigRendererStatus defines the observed state of ParamConfigRenderer +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the ParamsDesc object that has been observed by the controller. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`phase`
+ + +ParametersDescPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to PDAvailablePhase, the ParamsDesc can be referenced by ComponentDefinition. +

+ +
+

+ParameterDeletedMethod +(`string` alias) +

+ +

+ +(Appears on:ParameterDeletedPolicy) + +

+
+ +

+ParameterDeletedMethod defines how to handle parameter remove +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"RestoreToDefault" +

+
+ +
+ +

+"Reset" +

+
+ +
+

+ParameterDeletedPolicy + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`deletedMethod`
+ + +ParameterDeletedMethod + + + +
+ + +

+Specifies the method to handle the deletion of a parameter. +If set to “RestoreToDefault”, the parameter will be restored to its default value, +which requires engine support, such as pg. +If set to “Reset”, the parameter will be re-rendered through the configuration template. +

+ +
+ +`defaultValue`
+ +string + + +
+ +(Optional) + +

+Specifies the value to use if DeletedMethod is RestoreToDefault. +Example: pg +SET configuration_parameter TO DEFAULT; +

+ +
+

+ParameterPhase +(`string` alias) +

+ +

+ +(Appears on:ComponentParameterStatus, ComponentReconfiguringStatus, ConfigTemplateItemDetailStatus, ParameterStatus) + +

+
+ +

+ParameterPhase defines the Configuration FSM phase +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"FailedAndPause" +

+
+ +
+ +

+"FailedAndRetry" +

+
+ +
+ +

+"Finished" +

+
+ +
+ +

+"Init" +

+
+ +
+ +

+"MergeFailed" +

+
+ +
+ +

+"Merged" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Upgrading" +

+
+ +
+

+ParameterSpec + +

+ +

+ +(Appears on:Parameter) + +

+
+ +

+ParameterSpec defines the desired state of Parameter +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`componentParameters`
+ + +[]ComponentParametersSpec + + + +
+ + +

+Lists ComponentParametersSpec objects, each specifying a Component and its parameters and template updates. +

+ +
+

+ParameterStatus + +

+ +

+ +(Appears on:Parameter) + +

+
+ +

+ParameterStatus defines the observed state of Parameter +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`componentReconfiguringStatus`
+ + +[]ComponentReconfiguringStatus + + + +
+ +(Optional) + +

+Records the status of a reconfiguring operation if `opsRequest.spec.type` equals to “Reconfiguring”. +

+ +
+

+ParametersDefinitionSpec + +

+ +

+ +(Appears on:ParametersDefinition) + +

+
+ +

+ParametersDefinitionSpec defines the desired state of ParametersDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`fileName`
+ +string + + +
+ +(Optional) + +

+Specifies the config file name in the config template. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`deletedPolicy`
+ + +ParameterDeletedPolicy + + + +
+ +(Optional) + +

+Specifies the policy when parameter be removed. +

+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+

+ParametersDefinitionStatus + +

+ +

+ +(Appears on:ParametersDefinition) + +

+
+ +

+ParametersDefinitionStatus defines the observed state of ParametersDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the ParamsDesc object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ParametersDescPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to PDAvailablePhase, the ParamsDesc can be referenced by ComponentDefinition. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the ParametersDescription object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the ParametersDescription. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ParametersDescPhase +(`string` alias) +

+ +

+ +(Appears on:ParamConfigRendererStatus, ParametersDefinitionStatus) + +

+
+ +

+ParametersDescPhase defines the ParametersDescription CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+ParametersInFile + +

+ +

+ +(Appears on:ConfigTemplateItemDetail, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`content`
+ +string + + +
+ +(Optional) + +

+Holds the configuration keys and values. This field is a workaround for issues found in kubebuilder and code-generator. +Refer to https://github.com/kubernetes-sigs/kubebuilder/issues/528 and https://github.com/kubernetes/code-generator/issues/50 for more details. +

+ +

+Represents the content of the configuration file. +

+ +
+ +`parameters`
+ +map[string]*string + + +
+ +(Optional) + +

+Represents the updated parameters for a single configuration file. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+ParametersSchema Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topLevelKey`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configSchema.cue’. +

+ +
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schemaInJSON`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+Payload +(`map[string]encoding/json.RawMessage` alias) +

+ +

+ +(Appears on:ConfigTemplateItemDetail) + +

+
+ +

+Payload holds the payload data. This field is optional and can contain any type of data. +Not included in the JSON representation of the object. +

+
+

+ReconcileDetail + +

+ +

+ +(Appears on:ConfigTemplateItemDetailStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`policy`
+ +string + + +
+ +(Optional) + +

+Represents the policy applied during the most recent execution. +

+ +
+ +`execResult`
+ +string + + +
+ +(Optional) + +

+Represents the outcome of the most recent execution. +

+ +
+ +`currentRevision`
+ +string + + +
+ +(Optional) + +

+Represents the current revision of the configuration item. +

+ +
+ +`succeedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of pods where configuration changes were successfully applied. +

+ +
+ +`expectedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the total number of pods that require execution of configuration changes. +

+ +
+ +`errMessage`
+ +string + + +
+ +(Optional) + +

+Represents the error message generated when the execution of configuration changes fails. +

+ +
+

+ReconfiguringStatus + +

+ +

+ +(Appears on:ComponentReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ConfigTemplateItemDetailStatus`
+ + +ConfigTemplateItemDetailStatus + + + +
+ + +

+ +(Members of `ConfigTemplateItemDetailStatus` are embedded into this type.) + +

+ +
+ +`updatedParameters`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ParametersInFile + + + +
+ +(Optional) + +

+Contains the updated parameters. +

+ +
+ +`userConfigTemplates`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+

+ReloadAction + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+ReloadAction defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+ +`targetPodSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `reloadedPodSelector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `reloadedPodSelector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+

+ReloadPolicy +(`string` alias) +

+
+ +

+ReloadPolicy defines the policy of reconfiguring. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"asyncReload" +

+
+ +
+ +

+"dynamicReloadBeginRestart" +

+
+ +
+ +

+"none" +

+
+ +
+ +

+"restartContainer" +

+
+ +
+ +

+"restart" +

+
+ +
+ +

+"rolling" +

+
+ +
+ +

+"syncReload" +

+
+ +
+

+RerenderResourceType +(`string` alias) +

+ +

+ +(Appears on:ComponentConfigDescription) + +

+
+ +

+RerenderResourceType defines the resource requirements for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"hscale" +

+
+ +
+ +

+"tls" +

+
+ +
+ +

+"vscale" +

+
+ +
+ +

+"shardingHScale" +

+
+ +
+

+ScriptConfig + +

+ +

+ +(Appears on:DownwardAPIChangeTriggeredAction, ShellTrigger, TPLScriptTrigger) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scriptConfigMapRef`
+ +string + + +
+ + +

+Specifies the reference to the ConfigMap containing the scripts. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace for the ConfigMap. +If not specified, it defaults to the “default” namespace. +

+ +
+

+ShellTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+ShellTrigger allows to execute a custom shell script to reload the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+Specifies the command to execute in order to reload the process. It should be a valid shell command. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines the synchronization mode of parameter updates with “config-manager”. +

+
    +
  • +‘True’: Executes reload actions synchronously, pausing until completion. +
  • +
  • +‘False’: Executes reload actions asynchronously, without waiting for completion. +
  • +
+ +
+ +`batchReload`
+ +bool + + +
+ +(Optional) + +

+Controls whether parameter updates are processed individually or collectively in a batch: +

+
    +
  • +‘True’: Processes all changes in one batch reload. +
  • +
  • +‘False’: Processes each change individually. +
  • +
+ +

+Defaults to ‘False’ if unspecified. +

+ +
+ +`batchParamsFormatterTemplate`
+ +string + + +
+ +(Optional) + +

+Specifies a Go template string for formatting batch input data. +It’s used when `batchReload` is ‘True’ to format data passed into STDIN of the script. +The template accesses key-value pairs of updated parameters via the ‘$’ variable. +This allows for custom formatting of the input data. +

+ +

+Example template: +

+
+
+batchParamsFormatterTemplate: |-
+{{- range $pKey, $pValue := $ }}
+{{ printf "%s:%s" $pKey $pValue }}
+{{- end }}
+
+
+ +

+This example generates batch input data in a key:value format, sorted by keys. +

+
+
+key1:value1
+key2:value2
+key3:value3
+
+
+ +

+If not specified, the default format is key=value, sorted by keys, for each updated parameter. +

+
+
+key1=value1
+key2=value2
+key3=value3
+
+
+ +
+ +`toolsSetup`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload. +

+ +
+

+SignalType +(`string` alias) +

+ +

+ +(Appears on:UnixSignalTrigger) + +

+
+ +

+SignalType defines which signals are valid. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"SIGABRT" +

+
+ +
+ +

+"SIGALRM" +

+
+ +
+ +

+"SIGBUS" +

+
+ +
+ +

+"SIGCHLD" +

+
+ +
+ +

+"SIGCONT" +

+
+ +
+ +

+"SIGFPE" +

+
+ +
+ +

+"SIGHUP" +

+
+ +
+ +

+"SIGILL" +

+
+ +
+ +

+"SIGINT" +

+
+ +
+ +

+"SIGIO" +

+
+ +
+ +

+"SIGKILL" +

+
+ +
+ +

+"SIGPIPE" +

+
+ +
+ +

+"SIGPROF" +

+
+ +
+ +

+"SIGPWR" +

+
+ +
+ +

+"SIGQUIT" +

+
+ +
+ +

+"SIGSEGV" +

+
+ +
+ +

+"SIGSTKFLT" +

+
+ +
+ +

+"SIGSTOP" +

+
+ +
+ +

+"SIGSYS" +

+
+ +
+ +

+"SIGTERM" +

+
+ +
+ +

+"SIGTRAP" +

+
+ +
+ +

+"SIGTSTP" +

+
+ +
+ +

+"SIGTTIN" +

+
+ +
+ +

+"SIGTTOU" +

+
+ +
+ +

+"SIGURG" +

+
+ +
+ +

+"SIGUSR1" +

+
+ +
+ +

+"SIGUSR2" +

+
+ +
+ +

+"SIGVTALRM" +

+
+ +
+ +

+"SIGWINCH" +

+
+ +
+ +

+"SIGXCPU" +

+
+ +
+ +

+"SIGXFSZ" +

+
+ +
+

+TPLScriptTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+TPLScriptTrigger Enables reloading process using a Go template script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ScriptConfig`
+ + +ScriptConfig + + + +
+ + +

+ +(Members of `ScriptConfig` are embedded into this type.) + +

+ +

+Specifies the ConfigMap that contains the script to be executed for reload. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines whether parameter updates should be synchronized with the “config-manager”. +Specifies the controller’s reload strategy: +

+
    +
  • +If set to ‘True’, the controller executes the reload action in synchronous mode, +pausing execution until the reload completes. +
  • +
  • +If set to ‘False’, the controller executes the reload action in asynchronous mode, +updating the ConfigMap without waiting for the reload process to finish. +
  • +
+ +
+

+ToolConfig + +

+ +

+ +(Appears on:ToolsSetup) + +

+
+ +

+ToolConfig specifies the settings of an init container that prepare tools for dynamic reload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the init container. +

+ +
+ +`asContainerImage`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the tool image should be used as the container image for a sidecar. +This is useful for large tool images, such as those for C++ tools, which may depend on +numerous libraries (e.g., *.so files). +

+ +

+If enabled, the tool image is deployed as a sidecar container image. +

+ +

+Examples: +

+
+
+ toolsSetup::
+   mountPoint: /kb_tools
+   toolConfigs:
+     - name: kb-tools
+       asContainerImage: true
+       image:  apecloud/oceanbase:4.2.0.0-100010032023083021
+
+
+ +

+generated containers: +

+
+
+initContainers:
+ - name: install-config-manager-tool
+   image: apecloud/kubeblocks-tools:${version}
+   command:
+   - cp
+   - /bin/config_render
+   - /opt/tools
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+containers:
+ - name: config-manager
+   image: apecloud/oceanbase:4.2.0.0-100010032023083021
+   imagePullPolicy: IfNotPresent
+	  command:
+   - /opt/tools/reloader
+   - --log-level
+   - info
+   - --operator-update-enable
+   - --tcp
+   - "9901"
+   - --config
+   - /opt/config-manager/config-manager.yaml
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+
+
+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the tool container image. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed by the init container. +

+ +
+

+ToolsSetup + +

+ +

+ +(Appears on:ShellTrigger) + +

+
+ +

+ToolsSetup prepares the tools for dynamic reloads used in ShellTrigger from a specified container image. +

+ +

+Example: +

+
+
+
+toolsSetup:
+	 mountPoint: /kb_tools
+	 toolConfigs:
+	   - name: kb-tools
+	     command:
+	       - cp
+	       - /bin/ob-tools
+	       - /kb_tools/obtools
+	     image: docker.io/apecloud/obtools
+
+
+ +

+This example copies the “/bin/ob-tools” binary from the image to “/kb_tools/obtools”. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the directory path in the container where the tools-related files are to be copied. +This field is typically used with an emptyDir volume to ensure a temporary, empty directory is provided at pod creation. +

+ +
+ +`toolConfigs`
+ + +[]ToolConfig + + + +
+ +(Optional) + +

+Specifies a list of settings of init containers that prepare tools for dynamic reload. +

+ +
+

+UnixSignalTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+UnixSignalTrigger is used to trigger a reload by sending a specific Unix signal to the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`signal`
+ + +SignalType + + + +
+ + +

+Specifies a valid Unix signal to be sent. +For a comprehensive list of all Unix signals, see: ../../pkg/configuration/configmap/handler.go:allUnixSignals +

+ +
+ +`processName`
+ +string + + +
+ + +

+Identifies the name of the process to which the Unix signal will be sent. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/install-addons.mdx b/docs/en/release-1_0_1/user_docs/references/install-addons.mdx new file mode 100644 index 00000000..8b5ac19a --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/install-addons.mdx @@ -0,0 +1,327 @@ +--- +title: Install Addons +description: Install KubeBlocks addons with Helm +keywords: [addon, helm, KubeBlocks, kubernetes, operator, database] +sidebar_position: 3 +sidebar_label: Install Addons +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Addons + +With the release of KubeBlocks v0.8.0, Addons are decoupled from KubeBlocks and some Addons are not installed by default. If you want to use these Addons, install Addons first by index. Or if you uninstalled some Addons, you can follow the steps in this tutorial to install them again. + +This tutorial takes elasticsearch as an example. You can replace elasticsearch with the Addon you need. + +The official index repo is [KubeBlocks index](https://github.com/apecloud/block-index). Addons are maintained in the [KubeBlocks Addon repo](https://github.com/apecloud/kubeblocks-addons). + +:::note + +Make sure the major version of Addons and KubeBlocks are the same. + +For example, you can install an Addon v0.9.0 with KubeBlocks v0.9.2, but using mismatched major versions, such as an Addon v0.8.0 with KubeBlocks v0.9.2, may lead to errors. + +::: + + + + + +1. (Optional) Add the KubeBlocks repo. If you install KubeBlocks with Helm, just run `helm repo update`. + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update + ``` + +2. View the Addon versions. + + ```bash + helm search repo kubeblocks/elasticsearch --versions + ``` + + Expected output: + ```bash + NAME CHART VERSION APP VERSION DESCRIPTION + kubeblocks/elasticsearch 1.0.0 8.8.2 Elasticsearch is a distributed, RESTful search ... + kubeblocks/elasticsearch 0.9.1 8.8.2 Elasticsearch is a distributed, RESTful search ... + kubeblocks/elasticsearch 0.9.0 8.8.2 Elasticsearch is a distributed, RESTful search ... + kubeblocks/elasticsearch 0.8.0 8.8.2 Elasticsearch is a distributed, RESTful search ... + ``` + +3. Install the Addon (take elasticsearch as example). Specify a version with `--version`. + + ```bash + helm install kb-addon-elasticsearch kubeblocks/elasticsearch --namespace kb-system --create-namespace --version 1.0.0 + ``` + + :::note + + **Version Compatibility** + + - Always choose the Addon version compatible with your KubeBlocks deployment + - Existing deployments require version matching: + - KubeBlocks v1.0.0 → Addon v1.0.x + - KubeBlocks v0.9.x → Addon v0.9.x + - Mismatches may cause operational issues + ::: + +4. Verify whether this Addon is installed. + + The STATUS is `deployed` and this Addon is installed successfully. + + ```bash + helm list -A + > + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + ... + kb-addon-elasticsearch kb-system 1 2025-07-17 02:31:20.687010511 +0000 UTC deployed elasticsearch-1.0.0 8.8.2 + ``` + +5. (Optional) You can run the command below to uninstall the Addon. + + If you have created a related cluster, delete the cluster first. + + ```bash + helm uninstall kb-addon-elasticsearch --namespace kb-system + ``` + + + + + +1. View the index. + + kbcli creates an index named `kubeblocks` by default and you can check whether this index is created by running `kbcli addon index list`. + + ```bash + kbcli addon index list + > + INDEX URL + kubeblocks https://github.com/apecloud/block-index.git + ``` + + If the list is empty or you want to add your index, you can add the index manually by using `kbcli addon index add `. For example, + + ```bash + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + ``` + + If you are not sure whether the kubeblocks index is the latest version, you can update it. + + ```bash + kbcli addon index update kubeblocks + ``` + +2. (Optional) Search whether the Addon exists in the index. + + ```bash + kbcli addon search elasticsearch + > + ADDON VERSION INDEX + elasticsearch 0.9.0 kubeblocks + elasticsearch 0.9.1 kubeblocks + elasticsearch 1.0.0 kubeblocks + ``` + +3. Install the Addon. + + If there are multiple index sources and versions for an Addon, you can specify an index by `--index` and a version by `--version`. The system installs the latest version in the `kubeblocks` index by default. + + ```bash + kbcli addon install elasticsearch --index kubeblocks --version 1.0.0 + ``` + :::note + + **Version Compatibility** + + - Always choose the Addon version compatible with your KubeBlocks deployment + - Existing deployments require version matching: + - KubeBlocks v1.0.0 → Addon v1.0.x + - KubeBlocks v0.9.x → Addon v0.9.x + - Mismatches may cause operational issues + ::: + +4. Verify whether this Addon is installed. + + The STATUS is `Enabled` and this Addon is installed successfully. + + ```bash + kbcli addon list + > + NAME VERSION PROVIDER STATUS AUTO-INSTALL + elasticsearch 1.0.0 apecloud Enabled true + ``` + + And when you check the helm list, you will see the helm chart is installed. + + ```bash + helm list -A + > + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + ... + kb-addon-elasticsearch kb-system 1 2025-07-17 02:31:20.687010511 +0000 UTC deployed elasticsearch-1.0.0 8.8.2 + ``` + +5. Disable/Enable Addons + + * Enable an Addon. + Enable an Addon will install the helm chart associated with the Addon. + + ```bash + kbcli addon enable elasticsearch + ``` + + * Disable an Addon. + Disable an Addon will uninstall the helm chart associated with the Addon. + + ```bash + kbcli addon disable elasticsearch + ``` + +6. Uninstall an Addon. + + Uninstall an Addon will delete the Addon and all related resources. + + ```bash + kbcli addon uninstall elasticsearch + ``` + +:::tip + +What is the difference between using `kbcli` and `helm` to install an Addon? + +- `kbcli` installs an Addon CR, and KubeBlocks will reconcile the Addon CR to install/uninstall/upgrade the helm chart specified in the Addon CR. +- `helm` installs a helm chart directly, and you need to manage the helm chart manually. + +::: + + + + + +## Advanced Configuration + +### Set Addon Values + +You can customize the Addon values when installing or enabling an Addon, for example, set the image registry. + +:::tip + +Each Addon has its own values, one should check the full list of values first before making any changes. + +::: + +**Step 1. Check the full list of values can be set:** + +```bash +helm show values kubeblocks/elasticsearch --version 1.0.0 +``` +Where `kubeblocks` is the name of the addon helm repository, `elasticsearch` is the name of the chart, and `1.0.0` is the version of the chart. + +Or you can go to [KubeBlocks Addon repository](https://github.com/apecloud/kubeblocks-addons/blob/main/addons/elasticsearch/) and choose the right version to check more details. + +Expected output: +```yaml +image: + registry: docker.io + repository: apecloud/elasticsearch + exporter: + repository: apecloud/elasticsearch-exporter + tag: "v1.7.0" + plugin: + repository: apecloud/elasticsearch-plugins + tag: "8.8.2" +... # more values omitted for brevity +``` + +**Step 2. Update image registry to your private registry:** + + + + ```bash + helm upgrade -i kb-addon-elasticsearch kubeblocks/elasticsearch --namespace kb-system --version 1.0.0 --set image.registry=test.io # more values omitted for brevity + ``` + + + ```bash + kbcli addon enable kb-addon-elasticsearch --set image.registry=test.io # more values omitted for brevity + ``` + + + +**Step 3. Verify the changes.** + +* Check the helm chart values. + + ```bash + helm get values kb-addon-elasticsearch -n kb-system + ``` + + Expected output: + ```yaml + image: + registry: test.io + .. # more values omitted for brevity + ``` + Check if all values are updated as expected. + +* Check `ComponentVersion` CR if all the images are updated. + + Each KubeBlocks Addon has its own `ComponentVersion` CR to record the list of service versions and the list of images use for each version. + + ```bash + kubectl get cmpv elasticsearch -oyaml | yq '.spec.releases[].images' + ``` + + Expected output: + ```bash + elasticsearch: test.io/apecloud/elasticsearch:7.7.1 # using test.io instead of docker.io + ``` + + All images are using the private registry `test.io` as expected. + +* Check the `ComponentDefinition` CR if all the images are updated. + + ```bash + # for each ComponentDefinition CR, check the images field + kubectl get cmpd -l app.kubernetes.io/instance=kb-addon-elasticsearch -oyaml | yq '.items[].spec' | grep 'image:' + ``` + +* Check the `ComponentDefinition` Status after the changes. + + ```bash + # check the status of the ComponentDefinition CRs + kubectl get cmpd -l app.kubernetes.io/instance=kb-addon-elasticsearch + ``` + + Expected output: + ```bash + NAME SERVICE SERVICE-VERSION STATUS AGE + elasticsearch-7-1.0.0 elasticsearch Available 33m + elasticsearch-8-1.0.0 elasticsearch Available 33m + ``` + + If the status is `Unavailable`, you can describe the `ComponentDefinition` CR to see the error message. + + ```bash + kubectl describe cmpd -l app.kubernetes.io/instance=kb-addon-elasticsearch + ``` + + If the message is `immutable fields can't be updated` as below, + ```text + Status: + Message: immutable fields can't be updated + Observed Generation: 3 + Phase: Unavailable + ``` + + You need to annotate the `ComponentDefinition` CR to allow the changes. + + ```bash + kubectl annotate cmpd -l app.kubernetes.io/instance=kb-addon-elasticsearch apps.kubeblocks.io/skip-immutable-check=true + ``` + + Then the status will be `Available` again. diff --git a/docs/en/release-1_0_1/user_docs/references/install-kbcli.mdx b/docs/en/release-1_0_1/user_docs/references/install-kbcli.mdx new file mode 100644 index 00000000..97c91faf --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/install-kbcli.mdx @@ -0,0 +1,285 @@ +--- +title: Install and Configure the KubeBlocks CLI (kbcli) +description: Complete guide to installing, verifying, and configuring kbcli for KubeBlocks cluster management +keywords: [kbcli, KubeBlocks CLI, installation, configuration, command-line] +sidebar_position: 4 +sidebar_label: Install kbcli +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# KubeBlocks Command Line (kbcli) + +kbcli is the official command-line tool for managing KubeBlocks clusters. It provides: +- Cluster lifecycle management (create, scale, delete) +- Configuration and troubleshooting tools +- Version compatibility checks +- Shell auto-completion support + +## Prerequisites + +Before installing kbcli, ensure your system meets these requirements: + +- **All platforms**: + - Network access to download packages + - Administrator/sudo privileges +- **Windows**: + - PowerShell 5.0 or later +- **macOS/Linux**: + - curl or wget installed + - Homebrew (for macOS brew installation) + +## Install kbcli + +**Supported Platforms** + +kbcli is available for: +- **macOS** (Intel and Apple Silicon) +- **Windows** (x86-64) +- **Linux** (x86-64 and ARM64) + + + + +Choose your preferred installation method: + +- **curl** (recommended for most users) +- **Homebrew** (macOS package manager) + +**Option 1: Install with curl** + +To install the latest stable version: + +```bash +curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash +``` + +To install a specific version: + +1. Check the available versions in [kbcli Release](https://github.com/apecloud/kbcli/releases/). +2. Specify a version with `-s` and run the command below. + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s + ``` + +:::note +**Version Compatibility** + +- Always match kbcli version with your KubeBlocks deployment +- Latest stable is recommended for new installations +- Existing deployments require version matching: + - KubeBlocks v1.0.0 → kbcli v1.0.x + - KubeBlocks v0.9.x → kbcli v0.9.x +- Mismatches may cause operational issues +::: + +2. Run `kbcli version` to check the version of kbcli and ensure that it is successfully installed. + +:::tip +**Troubleshooting** +If installation fails: +1. Verify network connectivity +2. Check firewall/proxy settings +::: + +**Option 2: Install with Homebrew** + +1. Install ApeCloud tap, the Homebrew package of ApeCloud. + + ```bash + brew tap apecloud/tap + ``` + +2. Install kbcli. + + ```bash + brew install kbcli + ``` + + If you want to install kbcli with a specified version, run the commands below. + + ```bash + # View the available version + brew search kbcli + + # Specify a version + brew install kbcli@ + ``` + +3. Verify that kbcli is successfully installed. + + ```bash + kbcli -h + ``` + + + + + +Choose your preferred installation method: + +**Option 1: Script Installation (Recommended)** + +:::note + +By default, the script will be installed at C:\Program Files\kbcli-windows-amd64 and cannot be modified. + +If you need to customize the installation path, use the zip file. + +::: + +1. Run PowerShell as an **administrator** and execute `Set-ExecutionPolicy Unrestricted`. +2. Install kbcli. + + The following script will automatically install the environment variables at C:\Program Files\kbcli-windows-amd64. + + ```bash + powershell -Command " & ([scriptblock]::Create((iwr https://www.kubeblocks.io/installer/install_cli.ps1)))" + ``` + + To install a specified version of kbcli, use `-v` after the command and describe the version you want to install. + + ```bash + powershell -Command " & ([scriptblock]::Create((iwr https://www.kubeblocks.io/installer/install_cli.ps1))) -v 0.5.2" + ``` + +**Option 2: Manual Installation** + +1. Download the kbcli installation zip package from [kbcli Release](https://github.com/apecloud/kbcli/releases/). +2. Extract the file and add it to the environment variables. + 1. Click the Windows icon and select **System Settings**. + 2. Click **Settings** -> **Related Settings** -> **Advanced system settings**. + 3. Click **Environment Variables** on the **Advanced** tab. + 4. Click **New** to add the path of the kbcli installation package to the user and system variables. + 5. Click **Apply** and **OK**. + + + + + +Install using `curl`: + +1. Install kbcli. + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash + ``` + + To install a specific version: + + 1. Check the available versions in [kbcli Release](https://github.com/apecloud/kbcli/releases/). + 2. Specify a version with `-s` and run the command below. + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s + ``` + +2. Run `kbcli version` to check the version of kbcli and ensure that it is successfully installed. + +:::note + +- If a timeout exception occurs during installation, please check your network settings and retry. + +::: + + + + +## Uninstallation + +To completely remove kbcli and its configuration: + + + + + +For `curl`, run + +```bash +sudo rm /usr/local/bin/kbcli +``` + +For `brew`, run + +```bash +brew uninstall kbcli +``` + +kbcli creates a hidden folder named `~/.kbcli` under the HOME directory to store configuration information and temporary files. You can delete this folder after uninstalling kbcli. + + + + + +1. Go to the `kbcli` installation path and delete the installation folder. + + * If you install `kbcli` by script, go to `C:\Program Files` and delete the `kbcli-windows-amd64` folder. + * If you customize the installation path, go to your specified path and delete the installation folder. + +2. Delete the environment variable. + + 1. Click the Windows icon and click **System**. + 2. Go to **Settings** -> **Related Settings** -> **Advanced system settings**. + 3. On the **Advanced** tab, click **Environment Variables**. + 4. Double-click **Path** in **User variables** or **System variables** list. + * If you install `kbcli` by script, double-click **Path** in **User variables**. + * If you customize the installation path, double-click **Path** based on where you created the variable before. + 5. Select `C:\Program Files\kbcli-windows-amd64` or your customized path and delete it. This operation requires double confirmation. + +3. Delete a folder named `.kbcli`. + + kbcli creates a folder named `.kbcli` under the C:\Users\username directory to store configuration information and temporary files. You can delete this folder after uninstalling kbcli. + + + + + +Uninstall kbcli using the `curl` command. + +```bash +sudo rm /usr/local/bin/kbcli +``` + +kbcli creates a hidden folder named `~/.kbcli` under the HOME directory to store configuration information and temporary files. You can delete this folder after uninstalling kbcli. + + + + + + +## Shell Auto-completion + +kbcli provides command completion for: +- bash +- zsh +- fish +- PowerShell + +```bash +# Configure SHELL-TYPE as one type from bash, fish, PowerShell, and zsh +kbcli completion SHELL-TYPE -h +``` + +For example, enable kbcli auto-completion for zsh. + +***Steps:*** + +1. Check the user guide. + + ```bash + kbcli completion zsh -h + ``` + +2. Enable the completion function of your terminal first. + + ```bash + echo "autoload -U compinit; compinit" >> ~/.zshrc + ``` + +3. Enable the `kbcli` automatic completion function. + + ```bash + echo "source <(kbcli completion zsh); compdef _kbcli kbcli" >> ~/.zshrc + ``` diff --git a/docs/en/release-1_0_1/user_docs/references/install-minio.mdx b/docs/en/release-1_0_1/user_docs/references/install-minio.mdx new file mode 100644 index 00000000..4be3adf1 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/install-minio.mdx @@ -0,0 +1,52 @@ +--- +title: Install MinIO +description: Complete guide to installing MinIO for KubeBlocks cluster management +keywords: [MinIO, installation, configuration, command-line] +sidebar_position: 10 +sidebar_label: Install MinIO +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Install MinIO + +If you don't have an object storage service from a cloud provider, you can deploy the open-source service MinIO in Kubernetes and use it to configure BackupRepo. + +1. Install MinIO in the `kb-system` namespace. + + ```bash + helm install minio oci://registry-1.docker.io/bitnamicharts/minio --namespace kb-system --create-namespace --set "extraEnvVars[0].name=MINIO_BROWSER_LOGIN_ANIMATION" --set "extraEnvVars[0].value=off" --version 14.10.5 + ``` + + Get the initial username and password: + + ```bash + # Initial username + echo $(kubectl get secret --namespace kb-system minio -o jsonpath="{.data.root-user}" | base64 -d) + + # Initial password + echo $(kubectl get secret --namespace kb-system minio -o jsonpath="{.data.root-password}" | base64 -d) + ``` + +2. Generate credentials. + + Access the login page by running `kubectl port-forward --namespace kb-system svc/minio 9001:9001` and then accessing `127.0.0.1:9001`. + + Once you are logged in to the dashboard, you can generate an `access key` and `secret key`. + + ![backup-and-restore-backup-repo-1](/img/docs/en/backup-and-restore-backup-repo-1.png) + +3. Create a bucket. + + Create a bucket named `test-minio` for the test. + + ![backup-and-restore-backup-repo-2](/img/docs/en/backup-and-restore-backup-repo-2.png) + ![backup-and-restore-backup-repo-3](/img/docs/en/backup-and-restore-backup-repo-3.png) + + :::note + + The access address (endpoint) for the installed MinIO is `http://minio.kb-system.svc.cluster.local:9000`. In this case, `kb-system` is the name of the namespace where MinIO is installed. + + ::: diff --git a/docs/en/release-1_0_1/user_docs/references/install-snapshot-controller.mdx b/docs/en/release-1_0_1/user_docs/references/install-snapshot-controller.mdx new file mode 100644 index 00000000..7f904f61 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/install-snapshot-controller.mdx @@ -0,0 +1,76 @@ +--- +title: Install Snapshot Controller +description: Install Snapshot Controller +keywords: [kbcli, kubeblocks, SnapshotController, K8s, CSI] +sidebar_position: 5 +sidebar_label: Snapshot Controller +--- + +# Install Snapshot Controller + +The Snapshot Controller manages CSI Volume Snapshots, enabling creation, restoration, and deletion of Persistent Volume (PV) snapshots. KubeBlocks' DataProtection Controller leverages this component for database snapshot operations. + +**Step 1: Check Prerequisites** +Verify if required CRDs exist: + +```bash +kubectl get crd volumesnapshotclasses.snapshot.storage.k8s.io +kubectl get crd volumesnapshots.snapshot.storage.k8s.io +kubectl get crd volumesnapshotcontents.snapshot.storage.k8s.io +``` + +If your cluster lacks these CRDs, you'll need to install them first: + +```bash +# v8.2.0 is the latest version of the external-snapshotter, you can replace it with the version you need. +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +``` + + +:::note + +**Optional Installation** + +If snapshot backups aren't required, you can install just the CRDs and skip following steps. + +::: + + +**Step 2: Deploy Snapshot Controller** + +Install using Helm with these steps: + +```bash +helm repo add piraeus-charts https://piraeus.io/helm-charts/ +helm repo update +# Update the namespace to an appropriate value for your environment (e.g. kb-system) +helm install snapshot-controller piraeus-charts/snapshot-controller -n kb-system --create-namespace +``` + +For advanced configuration options, see the [Snapshot Controller documentation](https://artifacthub.io/packages/helm/piraeus-charts/snapshot-controller#configuration). + +**Step 3: Verify Deployment** + +Check if the snapshot-controller Pod is running: + +```bash +kubectl get pods -n kb-system | grep snapshot-controller +``` + +
+ +Expected Output + +```bash +snapshot-controller-xxxx-yyyy 1/1 Running 0 30s +``` + +
+ +If the pod is in a CrashLoopBackOff state, check logs: + +```bash +kubectl logs -n kb-system deployment/snapshot-controller +``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/references/kubeblocks_options.mdx b/docs/en/release-1_0_1/user_docs/references/kubeblocks_options.mdx new file mode 100644 index 00000000..0693fb5e --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/kubeblocks_options.mdx @@ -0,0 +1,217 @@ +--- +title: KubeBlocks Options and Roles +description: KubeBlocks Options and Roles +keywords: [kubeblocks, options, roles] +sidebar_position: 8 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# KubeBlocks Options and Roles + +## KubeBlocks Options + +### KubeBlocks Options +| Parameter | Description | Default | +|----------|------|--------| +| image.registry | KubeBlocks image repository | apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com | +| image.repository | KubeBlocks image repository | apecloud/kubeblocks | +| image.pullPolicy | Image pull policy | IfNotPresent | +| image.tag | Image tag, default follows chart appVersion | "" | +| image.imagePullSecrets | Image pull secrets | [] | +| image.tools.repository | Tools image repository | apecloud/kubeblocks-tools | +| replicaCount | Replica count | 1 | +| reconcileWorkers | Reconcile workers | "" | + +### Data Protection Options +| Parameter | Description | Default | +|----------|------|--------| +| dataProtection.enabled | Enable data protection controllers | true | +| dataProtection.leaderElectId | Data protection leader election ID | "" | +| dataProtection.encryptionKey | Backup encryption key | "" | +| dataProtection.encryptionKeySecretKeyRef.name | Encryption key Secret name | "" | +| dataProtection.encryptionKeySecretKeyRef.key | Encryption key Secret key | "" | +| dataProtection.encryptionKeySecretKeyRef.skipValidation | Skip key validation | false | +| dataProtection.enableBackupEncryption | Enable backup encryption | false | +| dataProtection.backupEncryptionAlgorithm | Backup encryption algorithm, choose one from "AES-128-CFB", "AES-192-CFB", "AES-256-CFB" | "" | +| dataProtection.gcFrequencySeconds | Garbage collection frequency (seconds) | 3600 | +| dataProtection.reconcileWorkers | Backup controller concurrency | "" | +| dataProtection.image.registry | Data protection image repository | "" | +| dataProtection.image.repository | Data protection image repository | | +| dataProtection.image.pullPolicy | Image pull policy | IfNotPresent | +| dataProtection.image.tag | Image tag | "" | +| dataProtection.image.imagePullSecrets | Image pull secrets | [] | +| dataProtection.image.datasafed.repository | Datasafed image repository | apecloud/datasafed | +| dataProtection.image.datasafed.tag | Datasafed image tag | 0.2.0 | + +### Backup Repository Options +| Parameter | Description | Default | +|----------|------|--------| +| backupRepo.create | Creates a backup repo during installation | false | +| backupRepo.default | Set the created repo as the default | true | +| backupRepo.accessMethod | The access method for the backup repo, options: [Mount, Tool] | Tool | +| backupRepo.storageProvider | The storage provider used by the repo, options: [s3, oss, minio] | "" | +| backupRepo.pvReclaimPolicy | The PV reclaim policy, options: [Retain, Delete] | Retain | +| backupRepo.volumeCapacity | The capacity for creating PVC | "" | +| backupRepo.config.bucket | Storage bucket | "" | +| backupRepo.config.endpoint | Storage endpoint | "" | +| backupRepo.config.region | Storage region | "" | +| backupRepo.secrets.accessKeyId | Storage secret key ID | "" | +| backupRepo.secrets.secretAccessKey | Storage secret key | "" | + +### Addon Options +| Parameter | Description | Default | +|----------|------|--------| +| addonController.enabled | Enable Addon controller, requires `cluster-admin `ClusterRole | true | +| addonController.jobTTL | Time-to-live period for addon jobs (time.Duration format) | 5m | +| addonController.jobImagePullPolicy | Image pull policy for addon install jobs | IfNotPresent | +| keepAddons | Keep Addon CR objects when uninstalling chart | true | +| addonChartLocationBase | KubeBlocks official addon chart location base. For air-gapped environments, if URL has prefix "file://", KubeBlocks will use Helm charts copied from addonChartsImage | file:// | +| addonChartsImage.registry | Addon charts image registry (defaults to image.registry if not specified) | "" | +| addonChartsImage.repository | Addon charts image repository | apecloud/kubeblocks-charts | +| addonChartsImage.pullPolicy | Image pull policy | IfNotPresent | +| addonChartsImage.tag | Image tag | "" | +| addonChartsImage.chartsPath | Helm charts path in addon charts image | /charts | +| addonChartsImage.pullSecrets | Image pull secrets | [] | +| addonHelmInstallOptions | Addon helm install options | ["--atomic", "--cleanup-on-fail", "--wait", "--insecure-skip-tls-verify"] | +| upgradeAddons | Upgrade addons when upgrading chart. Set to false to prevent addon CRs from being upgraded during chart upgrade | false | +| autoInstalledAddons | List of addons to auto-install during installation and upgrade | ["apecloud-mysql", "etcd", "kafka", "mongodb", "mysql", "postgresql", "qdrant", "redis", "rabbitmq"] | + + +### Controller Options +| Parameter | Description | Default | +|----------|------|--------| +| controllers.apps.enabled | Enable apps controller | true | +| controllers.workloads.enabled | Enable workloads controller | true | +| controllers.operations.enabled | Enable operations controller | true | +| controllers.experimental.enabled | Enable experimental controller | false | +| controllers.trace.enabled | Enable trace controller | false | + +### Feature Gates Options +| Parameter | Description | Default | +|----------|------|--------| +| featureGates.inPlacePodVerticalScaling.enabled | Enable in-place Pod vertical scaling | false | + + +To update the options, you can use the following command: + + + + +1. Installation +```bash +helm install kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --create-namespace \ + --version {{VERSION}} \ + --set optionName=optionValue +``` + +1. Upgrade +```bash +helm upgrade kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --version {{VERSION}} \ + --set optionName=optionValue +``` + + + + + +1. Installation +```bash +kbcli kubeblocks install \ + --version={{VERSION}} \ + --create-namespace \ + --set optionName=optionValue +``` + +1. Upgrade +```bash +kbcli kubeblocks upgrade \ + --version={{VERSION}} \ + --set optionName=optionValue +``` + + + + + +## KubeBlocks Operator RBAC Permissions +KubeBlocks operator requires the following permissions to work properly. + +### 1. Kubernetes Resource Permissions +**Main permissions include:** + +#### Core Cluster Permissions: +- **Node**: `list`, `watch` +- **Pod**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch`, `exec`, `log` +- **Service**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **ConfigMap**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **Secret**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **ServiceAccount**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **PersistentVolumeClaim**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **PersistentVolume**: `get`, `list`, `patch`, `update`, `watch` +- **Event**: `create`, `get`, `list`, `patch`, `watch` + +#### Application Resource Permissions: +- **Deployment**: `get`, `list`, `watch` +- **StatefulSet**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **Job**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **CronJob**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### Storage Related Permissions: +- **StorageClass**: `create`, `delete`, `get`, `list`, `watch` +- **CSIDriver**: `get`, `list`, `watch` +- **VolumeSnapshot**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **VolumeSnapshotClass**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### RBAC Permissions: +- **Role**: `get`, `list`, `watch` +- **RoleBinding**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### Coordination Mechanism Permissions: +- **Lease**: `create`, `get`, `list`, `patch`, `update`, `watch` + +#### Authentication Proxy Permissions + +- **TokenReview**: `create` +- **SubjectAccessReview**: `create` + +### 2. KubeBlocks Custom Resource Permissions + +- **apps.kubeblocks.io** API Groups: **ClusterDefinition**, **Cluster**, **ComponentDefinition**, **Component**, **ComponentVersion**, **Rollout**, - **ServiceDescriptor**, **ShardingDefinition**, **SidecarDefinition** +- **dataprotection.kubeblocks.io** API Groups: **ActionSet**, **BackupPolicy**, **BackupPolicyTemplate**, **BackupRepo**, **Backup**, **BackupSchedule**, - **Restore**, **StorageProvider** +- **operations.kubeblocks.io** API Groups: **OpsDefinition**, **OpsRequest** +- **parameters.kubeblocks.io** API Groups: **ComponentParameter**, **ParamConfigRenderer**, **Parameter**, **ParameterDefinition** +- **experimental.kubeblocks.io** API Groups: **NodeCountScaler** +- **extensions.kubeblocks.io** API Groups: **Addon** +- **trace.kubeblocks.io** API Groups: **ReconciliationTrace** +- **workloads.kubeblocks.io** API Groups: **InstanceSet** + +### 3. Conditional Permissions + +**Data Protection Feature (dataProtection.enabled=true):** +- backup-related permissions + +**Webhook Conversion Feature (webhooks.conversionEnabled=true):** +- **CustomResourceDefinition**: `create`, `get`, `list`, `patch`, `update`, `watch` +- **Deployment**: Additional deployment management permissions + +**Addon Controller (addonControllerEnabled=true):** +- **cluster-admin**: Full cluster administrator permissions + +:::note + +Addon Controller requires `cluster-admin` ClusterRole. +If you don't want to grant this permission, you can set `addonController.enabled=false` when installing KubeBlocks. + +Once disabled, one can still install addons through the `helm` way [install addons](../install_addons). + +::: + + + + diff --git a/docs/en/release-1_0_1/user_docs/references/kubernetes_and_operator_101.mdx b/docs/en/release-1_0_1/user_docs/references/kubernetes_and_operator_101.mdx new file mode 100644 index 00000000..60d74a61 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/kubernetes_and_operator_101.mdx @@ -0,0 +1,116 @@ +--- +title: Kubernetes and Operator 101 +description: things about K8s you need to know +keywords: [K8s, operator, concept, kubernetes, operator, database] +sidebar_position: 7 +--- + +# Kubernetes and Operator 101 + +## K8s + +What is Kubernetes? Some say it's a container orchestration system, others describe it as a distributed operating system, while some view it as a multi-cloud PaaS (Platform as a Service) platform, and others consider it a platform for building PaaS solutions. + +This article will introduce the key concepts and building blocks within Kubernetes. + +## K8s Control Plane + +The Kubernetes Control Plane is the brain and heart of Kubernetes. It manages the overall operation of the cluster, including processing API requests, storing configuration data, and ensuring the cluster's desired state. Key components include the API Server (which handles communication), etcd (which stores all cluster data), the Controller Manager (which enforces the desired state), the Scheduler (which assigns workloads to Nodes), and the Cloud Controller Manager (which manages cloud-specific integrations, such as load balancers, storage, and networking). Together, these components orchestrate the deployment, scaling, and management of containers across the cluster. + +## Node + +Some describe Kubernetes as a distributed operating system, capable of managing many Nodes. A Node is a physical or virtual machine that acts as a worker within the cluster. Each Node runs essential services, including the container runtime (such as Docker or containerd), the kubelet, and the kube-proxy. The kubelet ensures that containers are running as specified in a Pod, the smallest deployable unit in Kubernetes. The kube-proxy handles network routing, maintaining network rules, and enabling communication between Pods and services. Nodes provide the computational resources needed to run containerized applications and are managed by the Kubernetes Master, which distributes tasks, monitors Node health, and maintains the desired state of the cluster. + +:::note + +In certain contexts, the term "Node" can be confusing when discussing Kubernetes (K8s) alongside databases. In Kubernetes, a "Node" refers to a physical or virtual machine that is part of the Kubernetes cluster and serves as a worker to run containerized applications. However, when a database is running within Kubernetes, the term "Database Node" typically refers to a Pod that hosts a database instance. + +In the KubeBlocks documentation, "Node" generally refers to a Database Node. If we are referring to a Kubernetes Node, we will explicitly specify it as a "K8s Node" to avoid any confusion. + +::: + +## kubelet + +The kubelet is the agent that the Kubernetes Control Plane uses to manage each Node in the cluster. It ensures that containers are running in a Pod as defined by the Kubernetes control plane. The kubelet continuously monitors the state of the containers, making sure they are healthy and running as expected. If a container fails, the kubelet attempts to restart it according to the specified policies. + +## Pod + +In Kubernetes, a Pod is somewhat analogous to a virtual machine but is much more lightweight and specialized. It is the smallest deployable unit in Kubernetes. + +It represents one or more containers that are tightly coupled and need to work together, along with shared storage (volumes), network resources, and a specification for how to run the containers. These containers can communicate with each other using localhost and share resources like memory and storage. + +Kubernetes dynamically manages Pods, ensuring they are running as specified and automatically restarting or replacing them if they fail. Pods can be distributed across Nodes for redundancy, making them fundamental to deploying and managing containerized applications (including databases) in Kubernetes. + +## Storage Class + +When creating disks for workloads inside a Pod, such as databases, you may need to specify the type of disk media, whether it's HDD or SSD. In cloud environments, there are often more options available. For example, AWS EBS offers various volume types, such as General Purpose SSD (gp2/gp3), Provisioned IOPS SSD (io1/io2), and Throughput Optimized HDD (st1). In Kubernetes, you can select the desired disk type through a StorageClass. + +## PVC + +A Persistent Volume Claim (PVC) in Kubernetes is a request for storage by a user. A PVC is essentially a way to ask for storage with specific characteristics, such as storage class, size and access modes (e.g., read-write or read-only). PVCs enable Pods to use storage without needing to know the details of the underlying infrastructure. + +In K8s, to use this storage, users create a PVC. When a PVC is created, Kubernetes looks for a StorageClass that matches the request. If a matching StorageClass is found, it automatically provisions the storage according to the defined parameters—whether it's SSD, HDD, EBS or NAS. If a PVC does not specify a StorageClass, Kubernetes will use the default StorageClass (if one is configured) to provision storage. + +## CSI + +In Kubernetes, various StorageClasses are provided through the Container Storage Interface (CSI), which is responsible for provisioning the underlying storage "disks" used by applications. CSI functions similarly to a "disk driver" in Kubernetes, enabling the platform to adapt to and integrate with a wide range of storage systems, such as local disks, AWS EBS, and Ceph. These StorageClasses, and the associated storage resources, are provisioned by specific CSI drivers that handle the interaction with the underlying storage infrastructure. + +CSI is a standard API that enables Kubernetes to interact with various storage systems in a consistent and extensible manner. CSI drivers, created by storage vendors or the Kubernetes community, expose essential storage functions like dynamic provisioning, attaching, mounting, and snapshotting to Kubernetes. + +When you define a StorageClass in Kubernetes, it typically specifies a CSI driver as its provisioner. This driver automatically provisions Persistent Volumes (PVs) based on the parameters in the StorageClass and associated Persistent Volume Claims (PVCs), ensuring the appropriate type and configuration of storage—whether SSD, HDD, or otherwise—is provided for your applications. + +## PV + +In Kubernetes, a Persistent Volume (PV) represents a storage resource that can be backed by various systems like local disks, NFS, or cloud-based storage (e.g., AWS EBS, Google Cloud Persistent Disks), typically managed by different CSI drivers. + +A PV has its own lifecycle, independent of the Pod, and is managed by the Kubernetes control plane. It allows data to persist even if the associated Pod is deleted. PVs are bound to Persistent Volume Claims (PVCs), which request specific storage characteristics like size and access modes, ensuring that applications receive the storage they require. + +In summary, PV is the actual storage resource, while PVC is a request for storage. Through the StorageClass in the PVC, it can be bound to a PV provisioned by different CSI drivers. + +## Service + +In Kubernetes, a Service acts as a load balancer. It defines a logical set of Pods and provides a policy for accessing them. Since Pods are ephemeral and can be dynamically created and destroyed, their IP addresses are not stable. A Service resolves this issue by providing a stable network endpoint (a virtual IP address, known as a ClusterIP) that remains constant, allowing other Pods or external clients to communicate with the set of Pods behind the Service without needing to know their specific IP addresses. + +Service supports different types: ClusterIP (internal cluster access), NodePort (external access via `:`), LoadBalancer (exposes the Service externally using a cloud provider's load balancer), and ExternalName (maps the Service to an external DNS). + +## ConfigMap + +A ConfigMap is used to store configuration data in key-value pairs, allowing you to decouple configuration from application code. This way, you can manage application settings separately and reuse them across multiple environments. ConfigMaps can be used to inject configuration data into Pods as environment variables, command-line arguments, or configuration files. They provide a flexible and convenient way to manage application configurations without hardcoding values directly into your application container. + +## Secret + +A Secret is used to store sensitive data such as passwords, tokens, or encryption keys. Secrets allow you to manage confidential information separately from your application code and avoid exposing sensitive data in your container images. Kubernetes Secrets can be injected into Pods as environment variables or mounted as files, ensuring that sensitive information is handled in a secure and controlled manner. + +However, Secrets are not encrypted by default—they are simply base64-encoded, which does not provide real encryption. They should still be used with care, ensuring proper access controls are in place. + +## CRD + +If you want to manage database objects using Kubernetes, you need to extend the Kubernetes API to describe the database objects you're managing. This is where the CRD (Custom Resource Definition) mechanism comes in, allowing you to define custom resources specific to your use case, such as database clusters or backups, and manage them just like native Kubernetes resources. + +## CR + +A Custom Resource (CR) is an instance of a Custom Resource Definition (CRD). It represents a specific configuration or object that extends the Kubernetes API. CRs allow you to define and manage custom resources, such as databases or applications, using Kubernetes' native tools. Once a CR is created, Kubernetes controllers or Operators monitor it and perform actions to maintain the desired state. + +CRD and CR are the foundation for developing a Kubernetes Operator. CRDs are often used to implement custom controllers or operators, allowing for continuously watches for changes to CRs (representing, for example, database clusters) and automatically performs actions. + +## What is Kubernetes Operator? + +A Kubernetes Operator is a software, typically composed of one or more controllers, that automates the management of complex applications by translating changes made to a Custom Resource (CR) into actions on native Kubernetes objects, such as Pods, Services, PVCs, ConfigMaps, and Secrets. + +- Input: User modifications to the CR. +- Output: Corresponding changes to underlying Kubernetes resources or interactions with external systems (e.g., writing to a database or calling APIs), depending on the requirements of the managed application. + +The Operator continuously watches the state of these Kubernetes objects. When changes occur (e.g., a Pod crashes), the Operator automatically takes corrective actions, like recreating the Pod or adjusting traffic (e.g., updating Service Endpoints). + +In essence, a Kubernetes Operator encapsulates complex operational knowledge into software, automating tasks like deployment, scaling, upgrades, and backups, ensuring the application consistently maintains its desired state without manual intervention. + +## Helm and Helm Chart + +Helm is a popular package manager for Kubernetes that helps manage and deploy applications. It packages all the necessary Kubernetes resources into a single Helm Chart, allowing you to install applications with a single command (helm install). Helm also handles configuration management and updates (helm upgrade), making the entire lifecycle of the application much easier to manage. +Key components of a Helm Chart: + +- Templates: YAML files with placeholders that define Kubernetes resources (like Pods, Services, and ConfigMaps). +- Values.yaml: A file where users specify default values for the templates, allowing easy customization. Helm allows you to take an existing chart and override the default values using values.yaml or command-line flags, enabling you to provide environment-specific configurations without modifying the underlying templates. +- Chart.yaml: Metadata about the chart, including the name, version, and description. + +Helm integrates well with CI/CD tools like Jenkins, GitLab CI, and GitHub Actions. It can be used to automate deployments and rollbacks as part of a continuous delivery pipeline, ensuring that applications are consistently deployed across different environments. diff --git a/docs/en/release-1_0_1/user_docs/references/prepare-a-local-k8s-cluster.mdx b/docs/en/release-1_0_1/user_docs/references/prepare-a-local-k8s-cluster.mdx new file mode 100644 index 00000000..5a5b6d70 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/prepare-a-local-k8s-cluster.mdx @@ -0,0 +1,257 @@ +--- +title: Create a test Kubernetes cluster +description: Create a test Kubernetes cluster +keywords: [kbcli, kubeblocks, addons, installation] +sidebar_position: 5 +sidebar_label: Create a test Kubernetes cluster +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Create a test Kubernetes cluster + +This tutorial introduces how to create a local Kubernetes test cluster using Minikube, K3d, and Kind. These tools make it easy to try out KubeBlocks on your local host, offering a great solution for development, testing, and experimentation without the complexity of creating a full production-grade cluster. + +## Before you start + +Make sure you have the following tools installed on your local host: + +- Docker: All three tools rely on Docker to create containerized Kubernetes clusters. +- kubectl: The Kubernetes command-line tool for interacting with clusters. Refer to the [kubectl installation guide](https://kubernetes.io/docs/tasks/tools/) + + + + + +## Create a Kubernetes cluster using Kind + +Kind stands for Kubernetes IN Docker. It runs Kubernetes clusters within Docker containers, making it an ideal tool for local Kubernetes testing. + +1. Install Kind. For details, you can refer to [Kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/). + + + + + + ```bash + brew install kind + ``` + + + + + + ```bash + # For AMD64 / x86_64 + [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64 + # For ARM64 + [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-arm64 + chmod +x ./kind + sudo cp ./kind /usr/local/bin/kind + rm -rf kind + ``` + + + + + + You can use chocolatey to install Kind. + + ```bash + choco install kind + ``` + + + + + +2. Create a Kind cluster. + + ```bash + kind create cluster --name mykindcluster + ``` + + This command creates a single-node Kubernetes cluster running in a Docker container. + +3. Check whether the cluster is started and running. + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + mykindcluster-control-plane Ready control-plane 25s v1.31.0 + ``` + + You can see a node named `mykindcluster-control-plane` from the output, which means the cluster is created successfully. + +4. (Optional) Configure a cluster with multiple nodes. + + Kind also supports clusters with multiple nodes. You can create a multi-node cluster by a configuration file. + + ```yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + nodes: + role: control-plane + role: worker + role: worker + ``` + + Use the configuration file to create a multi-node cluster. + + ```bash + kind create cluster --name multinode-cluster --config kind-config.yaml + ``` + +5. If you want to delete the Kind cluster, run the command below. + + ```bash + kind delete cluster --name mykindcluster + ``` + + + + + +## Create a Kubernetes cluster using Minikube + +Minikube runs a single-node Kubernetes cluster on your local machine, either in a virtual machine or a container. + +1. Install Minikube. For details, you can refer to [Minikube Quick Start](https://minikube.sigs.k8s.io/docs/start/). + + + + + + ```bash + brew install minikube + ``` + + + + + + ```bash + curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm + sudo rpm -Uvh minikube-latest.x86_64.rpm + ``` + + + + + + You can use chocolatey to install Minikube. + + ```bash + choco install minikube + ``` + + + + + +2. Start Minikube. This command will create a local Kubernetes cluster. + + ```bash + minikube start + ``` + + You can also specify other drivers (such as Docker, Hyperkit, KVM) to start it. + + ```bash + minikube start --driver=docker + ``` + +3. Verify whether Minikube and the K8s cluster is running normally. + + Check whether Minikube is running. + + ```bash + minikube status + > + minikube + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + ``` + + Check whether the K8s cluster is running. + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + minikube Ready control-plane 1d v1.26.3 + ``` + + From the output, we can see that the Minikube node is ready. + + + + + +## Create a Kubernetes cluster using k3d + +k3d is a lightweight tool that runs k3s (a lightweight Kubernetes distribution) in Docker containers. + +1. Install k3d. For details, refer to [k3d Quick Start](https://k3d.io/v5.7.4/#releases). + + + + + + ```bash + brew install k3d + ``` + + + + + + ```bash + curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash + ``` + + + + + + You can use chocolatey to install k3d. + + ```bash + choco install k3d + ``` + + + + + +2. Create a k3s cluster. + + ```bash + k3d cluster create myk3s + ``` + + This command will create a Kubernetes cluster named as `myk3s` with a single node. + +3. Verify whether this cluster is running normally. + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + k3d-myk3s-server-0 Ready control-plane,master 31s v1.30.4+k3s1 + ``` + +4. If you want to delete the k3s cluster, run the command below. + + ```bash + k3d cluster delete myk3s + ``` + + + + diff --git a/docs/en/release-1_0_1/user_docs/references/terminology.mdx b/docs/en/release-1_0_1/user_docs/references/terminology.mdx new file mode 100644 index 00000000..aa4ea4d3 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/references/terminology.mdx @@ -0,0 +1,98 @@ +--- +title: Terminology +description: Terms you should know of KubeBlocks +keywords: [terminology] +sidebar_position: 2 +sidebar_label: Terminology +--- + +# Terminology + +### Addon + +An addon is an efficient and open extension mechanism. With the KubeBlocks addon, developers can quickly add a new database engine to KubeBlocks and obtain specific foundational management functionalities of that database engine, including but not limited to lifecycle management, data backup and recovery, metrics and log collection, etc. +### ActionSet + +An ActionSet declares a set of commands to perform backup and restore operations using specific tools, such as commands to backup MySQL using xtrabackup, as well as commands to restore data from the backup. + +### BackupPolicy + +A BackupPolicy represents a backup strategy for a Cluster, including details such as the backup repository (BackupRepo), backup targets, and backup methods. Multiple backup methods can be defined within a backup policy, with each method referring to a corresponding ActionSet. When creating a backup, the backup policy and backup method can be specified for the backup process. + +### BackupRepo + +BackupRepo is the storage repository for backup data. Its principle involves using a CSI driver to upload backup data to various storage systems, such as object storage systems like S3, GCS, as well as storage servers like FTP, NFS, and others. + +### BackupSchedule + +BackupSchedule declares the configuration for automatic backups in a Cluster, including backup frequency, retention period, backup policy, and backup method. The BackupSchedule Controller creates a CronJob to automatically backup the Cluster based on the configuration specified in the Custom Resource (CR). + +### Cluster + +Cluster is composed by [components](#component-is-the-fundamental-assembly-component-used-to-build-a-data-storage-and-processing-system-a-component-utilizes-a-statefulset-either-native-to-kubernetes-or-specified-by-the-customer-such-as-openkruise-to-manage-one-to-multiple-pods). + +### Component + +A component is the fundamental assembly component used to build a data storage and processing system. A Component utilizes a StatefulSet (either native to Kubernetes or specified by the customer, such as OpenKruise) to manage one to multiple Pods. + +### ComponentRef + +ComponentRef is used to select the component and its fields to be referenced. + +### ConfigConstraint + +KubeBlocks abstracts engine configuration files into ConfigConstraints to better support configuration changes. The abstracted information within ConfigConstraints includes the following content: + - the format of the configuration file; + - the dynamic and static parameters and the immutable parameters; + - the dynamically changing parameters; + - the parameter parity rules. + +### CRD (Custom Resource Definition) + +CRD (Custom Resource Definition) extends the Kubernetes API, empowering developers to introduce new data types and objects known as custom resources. + +### Operator + +Operator, a type of custom resource, automates tasks typically performed by human operators when managing one or more applications or services. By ensuring that a resource's defined state consistently aligns with its observed state, an operator supports Kubernetes in its management responsibilities. + +### OpsDefinition + +Ops is short for "Operations," representing database maintenance operations. It defines the operations tasks related to database management, specifying which operations are supported by the cluster and components. + +### OpsRequest + +An OpsRequest represents a single operation request. + +### RBAC (Role-Based Access Control) + +RBAC (Role-Based Access Control), also known as role-based security, is a methodology employed in computer systems security to limit access to a system's network and resources exclusively to authorized users. Kubernetes features a built-in API for managing roles within namespaces and clusters, enabling their association with specific resources and individuals. + +### ServiceDescriptor + +The ServiceDescriptor is a Custom Resource (CR) object used to describe API objects that reference storage services. It allows users to abstract a service provided either by Kubernetes or non-Kubernetes environments, making it available for referencing by other Cluster objects within KubeBlocks. The "ServiceDescriptor" can be used to address issues such as service dependencies, component dependencies, and component sharing within KubeBlocks. + +The management of containerized distributed database by KubeBlocks is mapped to objects at four levels: Cluster, Component, InstanceSet, and Instance, forming a layered architecture: + +### Cluster layer + +A Cluster object represents a complete distributed database cluster. Cluster is the top-level abstraction, including all components and services of the database. + +### Component layer + +A Component represents logical components that make up the Cluster object, such as metadata management, data storage, query engine, etc. Each Component object has its specific task and functions. A Cluster object contains one or more Component objects. + +### InstanceSet layer + + An InstanceSet object manages the workload required for multiple replicas inside a Component object, perceiving the roles of the replicas. A Component object contains an InstanceSet object. + +### Instance layer + +An Instance object represents an actual running instance within an InstanceSet object, corresponding to a Pod in Kubernetes. An InstanceSet object can manage zero to multiple Instance objects. + +### ComponentDefinition + + ComponentDefinition is an API used to define components of a distributed database, describing the implementation details and behavior of the components. With ComponentDefinition, you can define key information about components such as container images, configuration templates, startup scripts, storage volumes, etc. They can also set the behavior and logic of components for different events (e.g., node joining, node leaving, addition of components, removal of components, role switching, etc.). Each component can have its own independent ComponentDefinition or share the same ComponentDefinition. + +### ClusterDefinition + + ClusterDefinition is an API used to define the overall structure and topology of a distributed database cluster. Within ClusterDefinition, you can reference ComponentDefinitions of its included components, and define dependencies and references between components. diff --git a/docs/en/release-1_0_1/user_docs/release_notes/_category_.yml b/docs/en/release-1_0_1/user_docs/release_notes/_category_.yml new file mode 100644 index 00000000..ecb6688e --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/_category_.yml @@ -0,0 +1,4 @@ +position: 101 +label: Release Notes +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/090.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/090.mdx new file mode 100644 index 00000000..2487c67e --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/090.mdx @@ -0,0 +1,117 @@ +--- +title: v0.9.0 +description: Release Notes v0.9.0 +keywords: [kubeblocks,release notes] +sidebar_position: 10 +--- + +# KubeBlocks 0.9.0 (2024-07-09) + +We are thrilled to announce the release of KubeBlocks v0.9.0, which brings us one step closer to the highly anticipated v1.0 release. This version introduces several significant improvements and +new features that enhance the overall functionality and user experience of the KubeBlocks platform. + +## API Highlights + +- In KubeBlocks v0.9, with the introduction of topology support in KubeBlocks, the cluster building experience has become much more flexible and intuitive, akin to assembling a cluster using building blocks. The ClusterDefinition API has added the topologies field, allowing developers to provide various deployment patterns with different topologies. Database users can choose a topology when creating a Cluster through the topology field. For instance, the Redis Addon offers three topologies: Standalone, Replication, and Proxy. The Standalone topology only includes one Component - RedisServer, the Replication topology includes both RedisServer and Sentinel Components, and the Proxy topology adds a third Component, such as Twemproxy. +- KubeBlocks now supports managing horizontal scaling (Reshard) of distributed databases. You can represent a horizontal shard with a Component, and scale up or down this horizontal shard by adding or removing Components. This scaling capability will also be used in the distributed deployment of Redis and Pika. +- KubeBlocks now uses InstanceSet instead of StatefulSet to manage Pods. InstanceSet supports taking a specified Pod offline and Pod in-place update, and also the primary and secondary databases can adopt different pod specs in a database Replication architecture (StatefulSet doesn't support these features). +- Developers can add more custom event handlers for Components! The ComponentDefinition API, introduced in v0.8, includes the lifeCycleActions field, allowing you to define various custom event handlers. Building on this, KubeBlocks v0.9 provides more handlers for custom addon implementation, including roleprobe (node role probing), memberLeave (node offline), preTerminate (Component offline), and postProvision (Component online). The expansion of event handlers enhances KubeBlocks' expression capabilities. For example, preTerminate and postProvision can be used to execute cross-shard data redistribution (Rebalance) in distributed databases or initiate registration to third-party HA managers like Sentinel and Orchestrator. +Addon Highlights +- KubeBlocks supports Redis Cluster mode (sharding mode) [#5833](https://github.com/apecloud/kubeblocks/issues/5833) + Redis Cluster is designed to provide horizontal write scalability and intelligent client high-availability strategies, in addition to its excellent failover capability. Redis Cluster distributes data across multiple Redis nodes, significantly enhancing system capacity, performance, and availability. +- KubeBlocks introduces MySQL Replication mode [#1330](https://github.com/apecloud/kubeblocks/issues/1330) + Compared to MGR clusters, the MySQL Replication topology requires fewer resources (only two database replicas) and incurs less overhead for data replication. When there is no extreme demands for service availability and data reliability, the Replication topology is a more cost-effective choice. You can actively switch MySQL replica roles using kbcli or trigger a passive failover by deleting specified Kubernetes pods via kubectl. If there is no long transactions and large table DDLs, the failover generally can be completed within 30 seconds. + +## What's Changed + +### New Features + +**KubeBlocks** +- ClusterDefinition API + - Supports topology API, allowing developers to customize various topologies. [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- Cluster API + - Supports ShardingSpec API. [#6437](https://github.com/apecloud/kubeblocks/pull/6437) + - Supports sharding scaling. [#6774](https://github.com/apecloud/kubeblocks/pull/6774) +- ComponentDefinition API + - lifecycleActions API supports user-defined operation actions, including roleprobe, memberLeave, preTerminate, postProvision. [#6037](https://github.com/apecloud/kubeblocks/pull/6037) [#6582](https://github.com/apecloud/kubeblocks/pull/6582) [#6720](https://github.com/apecloud/kubeblocks/pull/6720) [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - New Vars API for referencing instance-related dynamic resources and information, including secret, service, and service reference. + - Supports Vars API. [#5919](https://github.com/apecloud/kubeblocks/pull/5919) + - Supports cross-Component Vars referencing. [#7155](https://github.com/apecloud/kubeblocks/pull/7155) + - Optimizes ServiceRef referencing. [#7006](https://github.com/apecloud/kubeblocks/pull/7006) + - Supports dynamic configuration, regenerating specified variables after vertical scaling or horizontal scaling is performed. [#6273](https://github.com/apecloud/kubeblocks/issues/6273) [#6690](https://github.com/apecloud/kubeblocks/issues/6690) +- Component + - Supports deleting Component. [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - Supports ComponentVersion. [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- InstanceSet API + - InstanceSet replaces StatefulSet to manage Pods. [#7084](https://github.com/apecloud/kubeblocks/pull/7084) + - Supports Instance Template. [#5799](https://github.com/apecloud/kubeblocks/issues/5799) + - Supports specified instance scaling down. [#6958](https://github.com/apecloud/kubeblocks/pull/6958) + - Supports In-place Update. [#7000](https://github.com/apecloud/kubeblocks/pull/7000) +- OpsRequest API + - Supports rebuilding faulty backup instances. [#6872](https://github.com/apecloud/kubeblocks/pull/6872) + - Supports the force flag to control concurrency. [#6828](https://github.com/apecloud/kubeblocks/pull/6828) + - Supports custom multi-task sequential execution. [#6735](https://github.com/apecloud/kubeblocks/pull/6735) +- Supports NodeCountScaler. [#7258](https://github.com/apecloud/kubeblocks/pull/7258) +- Supports PITR. [#6779](https://github.com/apecloud/kubeblocks/pull/6779) +- Supports cross-Namespace restore. [#6778](https://github.com/apecloud/kubeblocks/pull/6778) +**kbcli** +- Supports PostgreSQL point-in-time recovery. [#329](https://github.com/apecloud/kbcli/pull/329) +- cluster supports the rebuild-instance subcommand to rebuild instances. [#285](https://github.com/apecloud/kbcli/pull/295) +- cluster create subcommand supports elasticsearch. [#389](https://github.com/apecloud/kbcli/pull/389) +- Supports specifying path prefix when creating a backup repository. [#294](https://github.com/apecloud/kbcli/pull/294) + +**Addons** +*Redis* +- Supports the official Redis Cluster topology. [#301](https://github.com/apecloud/kubeblocks-addons/pull/301) +- Enhances the functionality and stability of Redis. + - Adapts to new APIs such as ComponentDefinition and ComponentVersion, supporting multiple topology forms. [#501](https://github.com/apecloud/kubeblocks-addons/pull/501) + - Optimizes Redis Replication Cluster initialization logic, removing dependency on DownwardAPI. [#462](https://github.com/apecloud/kubeblocks-addons/pull/462) [#616](https://github.com/apecloud/kubeblocks-addons/pull/616) + - Supports Redis v7.2.4. [#571](https://github.com/apecloud/kubeblocks-addons/pull/571) +- Redis sharding cluster supports backup and restore. [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MySQL* +- Adds the open-source component Orchestrator Addon to manage MySQL. [#625](https://github.com/apecloud/kubeblocks-addons/pull/625) [#567](https://github.com/apecloud/kubeblocks-addons/pull/567) +*PostgreSQL* +- Supports PostgreSQL PITR. [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +- Supports PostgreSQL v15.7. [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +*Qdrant* +- Qdrant sharding cluster supports backup and restore. [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MogDB* +- Supports the creation, scaling, backup and switchover of MogDB Replication Cluster in v5.0.5. [#343](https://github.com/apecloud/kubeblocks-addons/pull/343) [#350](https://github.com/apecloud/kubeblocks-addons/pull/350) +*ElasticSearch* +- Supports Elasticsearch v7.7.1, v7.10.1, and v8.8.2. [#767](https://github.com/apecloud/kubeblocks-addons/pull/767) +*Pulsar* +- Supports v3.0.2. [#340](https://github.com/apecloud/kubeblocks-addons/pull/40) +- Supports NodePort. [#358](https://github.com/apecloud/kubeblocks-addons/pull/358) +*VictoriaMetrics* +- Supports VictoriaMetrics v1.100.1. [#479](https://github.com/apecloud/kubeblocks-addons/pull/479) + +## API deprecations, and other changes for Release 0.9 + +- ConfigConstraint API becomes stable and upgrades from v1alpha1 to v1beta1. +- The group of StorageProvider changes and is migrated from storage.kubeblocks.io to dataprotection.kubeblocks.io. +- ClusterVersion v1alpha1 CRD will be removed in Release 1.0. +- ComponentClassDefinition v1alpha1 CRD will be removed in Release 1.0. +- ComponentResourceConstraint v1alpha1 CRD will be removed in Release 1.0. +- ClusterDefinition API + - type, componentDefs, connectionCredential will be removed in Release 1.0. +- Cluster API + - Scheduling: tenancy and availabilityPolicy will be removed in Release 1.0. + - API simplification: replicas, resources, storage, and network will be removed in Release 1.0. +- ComponentDefinition API + - switchPolicy will be removed in Release 1.0. The same capability can be achieved using the componentDefinition.spec.lifecycleActions.switchover API. +- ServiceRef API + - Cluster will be removed in Release 1.0. The same capability can be achieved using serviceRef.clusterServiceSelector. + +Additionally, all fields referencing the above APIs are also marked as deprecated and will be removed in Release 1.0. +- clusterVersionRef +- componentDefRef +- classDefRef +KubeBlocks Release 0.9 still maintains the compatibility with API marked as deprecated. +Deprecated Features +kbcli Deprecated features +- bench, fault, and migration subcommands are removed to streamline functions. + + +## Upgrade to v0.9 +Refer to [Upgrade to KubeBlocks v0.9](../upgrade/upgrade-to-0_9_0). + diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/091.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/091.mdx new file mode 100644 index 00000000..59413741 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/091.mdx @@ -0,0 +1,98 @@ +--- +title: v0.9.1 +description: Release Notes v0.9.1 +keywords: [kubeblocks,release notes] +sidebar_position: 9 +--- + +# KubeBlocks 0.9.1 (2024-10-18) + +We're thrilled to announce that KubeBlocks v0.9.1 now is released! + +In this release, KubeBlocks further optimizes its APIs and Addons to bring you new features and better user experience. This release includes new features like stopping/starting a cluster by Cluster API, instance rebuild capability in OpsRequest, PITR and key-based recovery for Redis. We also fixed some bugs and introduced several improvements to enhance the overall functionality. + +Read the full release note and [upgrade to KubeBlocks v0.9.1](../upgrade/upgrade-to-v09-version) to explore more features! + +## Highlights + +### KubeBlocks + +- Supports cluster stop & start operations via Cluster API + + This feature provides a new option to meet different needs in various scenarios. + +- Enhanced instance rebuild capability in OpsRequest + + Combined with KubeBlocks’ [InstanceSet](https://kubeblocks.io/blog/instanceset-introduction), this feature greatly improves the system’s recovery capability in failure scenarios. + +### Addons + +- Redis + + Supports PITR (Point-in-Time Recovery) and key-based recovery. + +- ZooKeeper + + Supports backup. + +- New versions + + MySQL and PostgreSQL Addons support more versions. For the latest versions of Addons, refer to the [Addon List](https://github.com/apecloud/kubeblocks-addons?tab=readme-ov-file#supported-add-ons). + +## What's Changed + +### New Features + +#### KubeBlocks + +- OpsDefinition and BackupPolicyTemplate support cmpdName prefix and regex matching [#8174](https://github.com/apecloud/kubeblocks/pull/8174) + + OpsDefinition and BackupPolicyTemplate now support component name prefixes and regular expression matching, offering greater flexibility. + +- High Availability (HA) records [#8089](https://github.com/apecloud/kubeblocks/pull/8089) + + KubeBlocks supports HA records, enhancing fault tolerance and system reliability. + +- Supports cluster start and stop operations via Cluster API [#7783](https://github.com/apecloud/kubeblocks/pull/7783) + + KubeBlocks supports starting and stopping clusters via Cluster API, simplifying cluster management. + +- Supports horizontal scaling on instance rebuilds [#7710](https://github.com/apecloud/kubeblocks/pull/7710) + + You can choose how to rebuild certain instance, in-place-rebuild or remote-rebuild, via inPlace field in OpsRequest API. + +- Automatic cleanup of failed OpsRequests [#7796](https://github.com/apecloud/kubeblocks/pull/7796) + + KubeBlocks adds a mechanism to automatically clean up failed OpsRequests, improving resource management. + +- Log Collection on Backup Failure [#8208](https://github.com/apecloud/kubeblocks/pull/8208) + + KubeBlocks supports collecting logs when backup operations fail. + +#### Addons + +- Redis Addon + - PITR for Standalone Replicas [#7998](https://github.com/apecloud/kubeblocks/pull/7998) + - Key-based recovery for Redis [#8129](https://github.com/apecloud/kubeblocks/pull/8129) +- Supports Loki [#707](https://github.com/apecloud/kubeblocks-addons/pull/707) +- Supports MinIO [#926](https://github.com/apecloud/kubeblocks-addons/pull/926) +- Supports RabbitMQ [#746](https://github.com/apecloud/kubeblocks-addons/pull/746) +- Supports MySQL 8.4 [#987](https://github.com/apecloud/kubeblocks-addons/pull/987) +- Supports PostgreSQL 16 [#973](https://github.com/apecloud/kubeblocks-addons/pull/973) +- ZooKeeper Addon + + - Supports backup. [#794](https://github.com/apecloud/kubeblocks-addons/pull/794), [#851](https://github.com/apecloud/kubeblocks-addons/pull/851) + +### Others + +- ComponentDefinition immutability check + + KubeBlocks adds a default immutability check for ComponentDefinition to avoid accidental changes, enhancing system stability. + +- Remove the Application Addon (#7866) + + The Application Addon has been removed from the codebase and is no longer installed by default. However, you can still install it manually if needed. + +## Upgrade to v0.9.1 + +Refer to [Upgrade to KubeBlocks v0.9.x](../upgrade/upgrade-to-v09-version). \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/092.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/092.mdx new file mode 100644 index 00000000..d9930786 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/092.mdx @@ -0,0 +1,55 @@ +--- +title: v0.9.2 +description: Release Notes v0.9.2 +keywords: [kubeblocks,release notes] +sidebar_position: 8 +--- + +# KubeBlocks 0.9.2 (2024-12-03) + +We are delighted to announce the release of KubeBlocks v0.9.2. This release includes several new features, bug fixes, and various improvements. + +Read the full release note and [upgrade to KubeBlocks v0.9.2](../upgrade/upgrade-to-v09-version) to explore more features! The upgrade process for v0.9.2 is identical to that of v0.9.1. Simply follow the v0.9.1 tutorial, updating the version number as needed to complete the upgrade to v0.9.2. + +## KubeBlocks + +- Added support for rolling updates of container images, enabling seamless updates with minimal downtime. ([#8389](https://github.com/apecloud/kubeblocks/pull/8389)) +- Introduced component-level stop/start capabilities, allowing fine-grained control over cluster components. ([#8480](https://github.com/apecloud/kubeblocks/pull/8480)) +- Enhanced Host Network support for shardings. ([#8517](https://github.com/apecloud/kubeblocks/pull/8517), [#8502](https://github.com/apecloud/kubeblocks/pull/8502)) +- Improved horizontal scaling OpeRequest for shardings. ([#8530](https://github.com/apecloud/kubeblocks/pull/8530)) +- Added support for recreate pod update policies to enhance update strategies. ([#8466](https://github.com/apecloud/kubeblocks/pull/8466)) +- KubeBlocks Installation improvements: Support for defining extra annotations and environment variables. ([#8454](https://github.com/apecloud/kubeblocks/pull/8454)) + +## Addons + +### MySQL + +- Added Jemalloc support for improved memory management. ([#1158](https://github.com/apecloud/kubeblocks-addons/pull/1158)) + +### Redis + +- Added NodePort announce mode support for Redis Sentinel. ([#1227](https://github.com/apecloud/kubeblocks-addons/pull/1227)) +- Introduced support for fixed pod IPs, custom master names, and full FQDN domains.([#1222](https://github.com/apecloud/kubeblocks-addons/pull/1222)) +- Updated user ACL backup frequency for PITR backups. ([#1180](https://github.com/apecloud/kubeblocks-addons/pull/1180)) + +### RabbitMQ + +- Added support for member leave operations to enable scale-in scenarios. ([#1229](https://github.com/apecloud/kubeblocks-addons/pull/1229)) +- Enhanced RabbitMQ configuration with config constraints and file-based logs. ([#1199](https://github.com/apecloud/kubeblocks-addons/pull/1199)) + +### MongoDB + +- Added support for host networking. ([#1152](https://github.com/apecloud/kubeblocks-addons/pull/1152)) + +### PostgreSQL + +- Enhanced vanilla PostgreSQL integration and added support for PostgreSQL 15. ([#1092](https://github.com/apecloud/kubeblocks-addons/pull/1092)) +- Added support for Supabase PostgreSQL. ([#1154](https://github.com/apecloud/kubeblocks-addons/pull/1154)) +Xinference +- Added support for Xinference v0.15.4. ([#1248](https://github.com/apecloud/kubeblocks-addons/pull/1248)) + +You can view the [full changelog here](https://github.com/apecloud/kubeblocks/compare/v0.9.1...v0.9.2). + +## Upgrade to v0.9.2 + +Refer to [Upgrade to KubeBlocks v0.9.x](../upgrade/upgrade-to-v09-version). diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/093.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/093.mdx new file mode 100644 index 00000000..f4aea4aa --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/093.mdx @@ -0,0 +1,82 @@ +--- +title: v0.9.3 +description: Release Notes v0.9.3 +keywords: [kubeblocks,release notes] +sidebar_position: 7 +--- + +# KubeBlocks 0.9.3 (2025-02-19) + +## KubeBlocks + +### Features + +- **Member Join Action**: LifeCycle Actions supported `memberjoin` action. +- **Incremental Backup**: DataProtection supported incremental backups ([#8757](https://github.com/apecloud/kubeblocks/pull/8757)). +- **Sharding Cluster Improvement** + - Supported rebuilding an instance for sharding clusters from backup ([#8777](https://github.com/apecloud/kubeblocks/pull/8777)). + - Supported sharding components switchover in OpsRequest ([#8786](https://github.com/apecloud/kubeblocks/pull/8786)). + +## KubeBlocks Addons + +### MySQL + +- **Improvements:** + - Supported configuring `lower_case_table_names` for MySQL ([#1335](https://github.com/apecloud/kubeblocks-addons/pull/1335)). + - Updated MySQL configuration to support `default_time_zone` updates ([#1377](https://github.com/apecloud/kubeblocks-addons/pull/1377)). +- **Fixes:** + - Fixed an issue where the MySQL backup policy wasn't applied. ([#1310](https://github.com/apecloud/kubeblocks-addons/pull/1310)). + +### PostgreSQL + +- **Fixes:** + - Fixed PITR (Point-in-Time Recovery) failures due to missing WAL log backups ([#1280](https://github.com/apecloud/kubeblocks-addons/pull/1280)). + - Resolved PITR failures ([#1290](https://github.com/apecloud/kubeblocks-addons/pull/1290)). + - Improved WAL log archiving. + +### Redis + +- **Improvements:** + - Updated Redis to address [CVE-2024-46981](https://access.redhat.com/security/cve/cve-2024-46981) ([#1405](https://github.com/apecloud/kubeblocks-addons/pull/1405)). + - Supported custom secret password in Redis ([#1406](https://github.com/apecloud/kubeblocks-addons/pull/1406)). + - Supported `storageClassName` values for Redis Sentinel ([#1418](https://github.com/apecloud/kubeblocks-addons/pull/1418)). +- **Fixes:** + - Fixed FQDN issues when scaling out shards in Redis Cluster ([#1283](https://github.com/apecloud/kubeblocks-addons/pull/1283)). + +### MongoDB + +- **Improvements:** + - Added new MongoDB versions: v5.0.30/6.0.20/7.0.16/8.0.4 ([#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431)). + - Updated MongoDB cluster creation examples ([#1363](https://github.com/apecloud/kubeblocks-addons/pull/1363)). + +### ClickHouse + +- **Improvements:** + - Added ClickHouse reconfiguration examples ([#1401](https://github.com/apecloud/kubeblocks-addons/pull/1401)). + - Fixed ClickHouse sharding bootstrap issues ([#1402](https://github.com/apecloud/kubeblocks-addons/pull/1402)). + - Added storage configuration, fixed horizontal scaling failure, and disabled sharding([#1450](https://github.com/apecloud/kubeblocks-addons/pull/1450)). +- **Fixes:** + - Resolved an issue where data querying from other shards for distributed tables failed ([#1411](https://github.com/apecloud/kubeblocks-addons/pull/1411)). + +### Zookeeper + +- **Improvements:** + - Replaced Java with `nc` for Zookeeper probes. + +- **Fixes:** + - Fixed the incorrect backup size issue. + - Resolved the restore failure. + - Fixed the snapshot-log mount error. + +### TiDB + +- **Improvements:** + - Added support for TiDB v8.4 ([#1275](https://github.com/apecloud/kubeblocks-addons/pull/1275)). + +## Upgrade to v0.9.3 + +Refer to [Upgrade to KubeBlocks v0.9.x](../upgrade/upgrade-to-v09-version). + +## Full Changelog + +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v0.9.2...v0.9.3) for additional changes. diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/094.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/094.mdx new file mode 100644 index 00000000..3c1a156f --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/094.mdx @@ -0,0 +1,75 @@ +--- +title: v0.9.4 +description: Release Notes v0.9.4 +keywords: [kubeblocks,release notes] +sidebar_position: 6 +--- + + +# KubeBlocks 0.9.4 (2025-06-30) + +We are delighted to announce the release of KubeBlocks v0.9.4. This release includes several new features, bug fixes, and various improvements. Here are the detailed update contents. + +## KubeBlocks + +### Features + +* **Cluster supports specifying PVC annotations and labels** By applying different annotations/labels to different types of PVC, CSI Drivers can set different IO quotas for various volumes. [(#8799)](https://github.com/apecloud/kubeblocks/pull/8799) + +* **Add annotation key to skip the pre-terminate action** Annotation: `apps.kubeblocks.io/skip-pre-terminate` [(#9121)](https://github.com/apecloud/kubeblocks/pull/9121) + +* **Support re-rendering configs on sharding component hscale** Re-render configs after horizontal scaling of shard components. [(#9195)](https://github.com/apecloud/kubeblocks/pull/9195) + +## KubeBlocks Addons + +### MySQL + +* Update parameter scope for `innodb_redo_log_capacity` and `secure_file_priv` [(#1510)](https://github.com/apecloud/kubeblocks-addons/pull/1510) [(#1585)](https://github.com/apecloud/kubeblocks-addons/pull/1585) + +### PostgreSQL + +* Fix missing `backupPolicy` for PostgreSQL 15 & 16 [(#1546)](https://github.com/apecloud/kubeblocks-addons/pull/1546) + +### Redis + +* Improve Redis cluster on stop-start [(#1554)](https://github.com/apecloud/kubeblocks-addons/pull/1554) +* Improve member leave logic (do not rely on announce address) [(#1548)](https://github.com/apecloud/kubeblocks-addons/pull/1548) +* Refine Redis host network variables [(#1603)](https://github.com/apecloud/kubeblocks-addons/pull/1603) + +### MongoDB + +* Add support for MongoDB exporter [(#1721)](https://github.com/apecloud/kubeblocks-addons/pull/1721) + + +### RabbitMQ + +* Fix RabbitMQ startup failure [(#1479)](https://github.com/apecloud/kubeblocks-addons/pull/1479) +* Fix RabbitMQ member leave issue [(#1657)](https://github.com/apecloud/kubeblocks-addons/pull/1657) + + +### ZooKeeper + +* Add new CMPD definition for ZooKeeper [(#1514)](https://github.com/apecloud/kubeblocks-addons/pull/1514) +* Fix snapshot log loss [(#1509)](https://github.com/apecloud/kubeblocks-addons/pull/1509) +* Add `minSessionTimeout` [(#1535)](https://github.com/apecloud/kubeblocks-addons/pull/1535) +* Fix backup and restore logic [(#1550)](https://github.com/apecloud/kubeblocks-addons/pull/1550) +* Improve ZooKeeper role probe [(#1542)](https://github.com/apecloud/kubeblocks-addons/pull/1542) +* Enhance ZooKeeper trace logging [(#1693)](https://github.com/apecloud/kubeblocks-addons/pull/1693) + + +### VictoriaMetrics + +* Support environment variable when creating VictoriaMetrics clusters [(#1622)](https://github.com/apecloud/kubeblocks-addons/pull/1622) + + +### ClickHouse + +* Persist log files for ClickHouse and Keeper [(#1560)](https://github.com/apecloud/kubeblocks-addons/pull/1560) + +## Upgrade to v0.9.4 + +Refer to [Upgrade to KubeBlocks v0.9.x](../../upgrade/upgrade-to-v09-version). + +## Full Changelog + +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v0.9.3...v0.9.4) for additional changes. \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx new file mode 100644 index 00000000..02e0f6d7 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx @@ -0,0 +1,67 @@ +--- +title: v0.9.5 +description: Release Notes v0.9.5 +keywords: [kubeblocks,release notes] +sidebar_position: 5 +--- + +# KubeBlocks 0.9.5 (2025-09-01) +We are delighted to announce the release of KubeBlocks v0.9.5. This release includes several new features, bug fixes, and various improvements. Here are the detailed update contents. + +## KubeBlocks + +### Features + +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9595](https://github.com/apecloud/kubeblocks/pull/9595) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9489](https://github.com/apecloud/kubeblocks/pull/9489) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9607](https://github.com/apecloud/kubeblocks/pull/9607) +- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. [#9536](https://github.com/apecloud/kubeblocks/pull/9536) [#9545](https://github.com/apecloud/kubeblocks/pull/9545) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support. [#9492](https://github.com/apecloud/kubeblocks/pull/9492) + +## KubeBlocks Addons + +### MySQL Variants (MySQL, GreatSQL) +- Support greatsql. [#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793) +- Support mysql audit log config. [#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890) +- Support greatsql auditlog config. [#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893) + +### PostgreSQL +- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. [#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734) +- Support using etcd as DCS for PostgreSQL. [#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864) + +### Redis +- Support redis 7.2.10 and redis 8 [#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812) +- Fix Redis slave instance memory leak when loading redisgears module. +- Change redis maxmemory-policy to volatile-lru and maxmemory = 0.8 * limit_memory. + +### MongoDB +- Support mongodb sharding. [#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701) + +### Elasticsearch +- Optimize backup. [#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853) +- Support new version 8.15.5. [#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929) + +### ClickHouse +- Clickhouse support backup and restore. [#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800) +- Update metrics. [#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916) + +### ZooKeeper +- Improve zookeeper jvm setting and gc option. [#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771) [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938) + +### etcd +- Support etcd v3.6.1. [#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737) +- Improve etcd backup procedure. [#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740) +- Improve etcd params and configuration. [#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778) + +### Milvus +- Support to create Milvus clusters on arm. [#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792) + +## Upgrade to v0.9.5 + +Refer to [Upgrade to KubeBlocks v0.9.x](../../upgrade/upgrade-to-v09-version). + +## Full Changelog + +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v0.9.4...v0.9.5) for additional changes. + + diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/_category_.yml b/docs/en/release-1_0_1/user_docs/release_notes/release-09/_category_.yml new file mode 100644 index 00000000..4e73ebf5 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/_category_.yml @@ -0,0 +1,4 @@ +position: 2 +label: Release v0.9 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx new file mode 100644 index 00000000..0053618a --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx @@ -0,0 +1,187 @@ +--- +title: v1.0.0-cn +description: Release Notes v1.0.0 +keywords: [kubeblocks, release notes] +sidebar_position: 1 +hidden: true +--- + +# KubeBlocks 1.0.0 (2025-05-28) + +我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 + +KubeBlocks 1.0.0 标志着项目发展的重要里程碑,核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现重大增强,为生产环境带来更高的灵活性和可靠性。 + +## Highlights + +### APIs Graduated to Stable + +在 KubeBlocks v0.9 中引入的诸多核心能力(如灵活拓扑、InstanceSet、生命周期扩展)已在 KubeBlocks 1.0 中正式稳定。 + +以下 CRD 现在为 **`v1` 且已稳定**,将长期支持: + +**`apps.kubeblocks.io` API 组:** + +* `ClusterDefinition` +* `Cluster` +* `ComponentDefinition` +* `Component` +* `ComponentVersion` +* `ServiceDescriptor` +* `ShardingDefinition` +* `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组:** + +* `InstanceSet` + +### KubeBlocks Features + +* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 +* **增量备份**:新增增量备份支持,提升性能并减少存储占用 + +### KubeBlocks Addon Highlights + +* **MySQL 改进**:新增 TLS 支持、基于 ProxySQL 的组复制和 WAL-G 实现的 PITR 功能,显著提升安全性和恢复能力 +* **MongoDB PITR 与版本支持**:为 MongoDB 引入时间点恢复功能并支持新版本 +* **Kafka 优化**:支持外部 ZooKeeper、自定义 Prometheus 指标和多网络访问,提升灵活性和可观测性 +* **Redis 增强**:新增集群切换、实例重建和外部配置支持,提高运维健壮性 + +--- + +## What's Changed + +### KubeBlocks + +#### 集群管理 + +* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) +* **动态镜像仓库**:支持动态替换镜像仓库实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) +* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) +* **Pod 标签/注解更新**:可更新底层 Pod 标签与注解,增强运维能力 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) +* **PVC 卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) +* **组件定义策略规则**:新增细粒度策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) +* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) + +#### 数据保护 + +* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) +* **备份参数一致性**:支持备份与恢复参数,确保一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) +* **保留最近备份**:支持保留最新备份,提升恢复能力 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) + +#### 运维 + +* **OpsRequest 验证**:引入验证策略确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) + +--- + +### KubeBlocks Addons + +#### MySQL + +* **TLS 支持**:新增安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) +* **组复制 + ProxySQL**:支持高可用架构 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) +* **PITR 恢复**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) +* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) + +#### Redis + +* **集群切换与哨兵优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) +* **实例重建**:支持 Redis 实例重建 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) + +#### MongoDB + +* **PITR 恢复**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) +* **新版本支持**:新增 MongoDB 8.0.8 和 8.0.6 版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) + +#### Kafka + +* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) +* **自定义 Prometheus 指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) +* **跳过端口解析**:提升使用 Pod IP 时的灵活性 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) +* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) + +#### RabbitMQ + +* **新版本支持**:新增 RabbitMQ 4.0.9 版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) + +#### ClickHouse + +* **22.9.4 支持**:新增 ClickHouse 22.9.4 兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) + +#### TiDB + +* **8.4 版本支持**:新增 TiDB 8.4 支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) +* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) + +--- + +### API 更新、废弃与新增 + +#### GA 稳定版(v1)发布 + +以下 CRDs **正式升级至 `v1`**,将获得长期支持: + +**`apps.kubeblocks.io` API 组** + +- `ClusterDefinition` +- `Cluster` +- `ComponentDefinition` +- `Component` +- `ComponentVersion` +- `ServiceDescriptor` +- `ShardingDefinition` +- `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组** + +- `InstanceSet` + +> 这些资源的 `v1alpha1` 和 `v1beta1` 版本现已弃用,可能在后续版本中移除。 + +#### 废弃 API + +以下 CRD 已弃用并将在**后续版本中移除**,请相应调整您的配置: + +* `ConfigConstraint` +* `Configuration` + +> 这些资源将不再维护或更新。 + +#### 新增 Alpha API + +新增 **`parameters.kubeblocks.io`** API 组,引入更细粒度的参数管理API: + +* `ComponentParameter` +* `ParamConfigRenderer` +* `Parameter` +* `ParametersDefinition` + +> 这些 API 旨在替代已弃用的 `ConfigConstraint` 和 `Configuration`。 + +#### API 组调整 + +部分 API 已迁移至新的分组: + +| 资源 | 原 API 组 | 新 API 组 | +| -------------------------- | ---------------- | -------------------------- | +| `OpsDefinition`/`OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | +| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | + +## 升级注意事项 + +:::note +请注意,目前不支持从 0.9 版本直接升级到 1.0 版本。 +::: + +我们正在开发一个稳健且经过测试的升级路径,将在后续版本中发布。 + +## 致谢 + +值此 KubeBlocks 1.0.0 发布之际,我们要向所有工程师、贡献者和合作伙伴致以诚挚的感谢,正是你们的努力塑造了项目的今天。 + +特别感谢快手、中国移动云、唯品会、腾讯、360 等团队, 他们的技术贡献、深入反馈和真实使用场景大大提升了项目架构、性能和生产可用性。 + +我们也衷心感谢社区贡献者——你们的代码贡献、问题报告、讨论和评审对推动项目质量和创新至关重要。 + +随着 1.0.0 的到来,我们在构建云原生数据库平台的征途上迈出了坚实一步。我们期待与大家一同继续前行,壮大生态、迎接挑战、共同推动数据基础设施的未来发展。 diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx new file mode 100644 index 00000000..685169b9 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx @@ -0,0 +1,187 @@ +--- +title: v1.0.0 +description: Release Notes v1.0.0 +keywords: [kubeblocks, release notes] +sidebar_position: 1 +--- + +# KubeBlocks 1.0.0 (2025-05-28) + +We are glad to announce the release of KubeBlocks 1.0.0. + +KubeBlocks 1.0.0 marks a significant milestone, with key APIs graduating to stable (v1) and substantial enhancements in cluster management, data protection, and operational stability—bringing greater flexibility and reliability to production environments. + +## Highlights + +### APIs Graduated to Stable + +KubeBlocks 0.9 introduced flexible topologies, advanced pod management (InstanceSet), and lifecycle hooks. Many of these features have now **graduated to stable**. + +The following CRDs are now **`v1` and considered stable**, with long-term support: + +**`apps.kubeblocks.io` API Group:** + +* `ClusterDefinition` +* `Cluster` +* `ComponentDefinition` +* `Component` +* `ComponentVersion` +* `ServiceDescriptor` +* `ShardingDefinition` +* `SidecarDefinition` + +**`workloads.kubeblocks.io` API Group:** + +* `InstanceSet` + +### KubeBlocks Features + +* **Rolling Updates**: Supports rolling upgrades via the Cluster API, minimizing downtime during updates +* **Incremental Backups**: Introduces support for incremental backups to improve performance and reduce storage usage + +### KubeBlocks Addon Highlights + +* **MySQL Enhancements**: Adds TLS, group replication with ProxySQL, and PITR using WAL-G—boosting security and recovery +* **MongoDB PITR & Versioning**: Introduces Point-in-Time Recovery and new version support for MongoDB +* **Kafka Improvements**: Supports external ZooKeeper, custom Prometheus metrics, and multi-network access for better flexibility and observability +* **Redis Enhancements**: Adds cluster switchover, instance rebuild, and external config support to improve operational robustness + +--- + +## What's Changed + +### KubeBlocks + +#### Cluster Management + +* **Rolling Upgrades**: Enables zero-downtime upgrades via Cluster API [#8973](https://github.com/apecloud/kubeblocks/pull/8973) +* **Dynamic Image Registry**: Supports dynamic image registry replacement for more flexible deployments [#8018](https://github.com/apecloud/kubeblocks/pull/8018) +* **Shard Pod Anti-Affinity**: Adds anti-affinity rules for shard components [#8705](https://github.com/apecloud/kubeblocks/pull/8705) +* **Pod Metadata Updates**: Allows updating labels and annotations for underlying pods [#8571](https://github.com/apecloud/kubeblocks/pull/8571) +* **PVC Volume Attributes**: Enables setting volumeAttributesClass for PVCs [#8783](https://github.com/apecloud/kubeblocks/pull/8783) +* **PolicyRules for Component Definitions**: Adds granular policy control [#8328](https://github.com/apecloud/kubeblocks/pull/8328) +* **Component Role Refactoring**: Improves role definition for component management [#8416](https://github.com/apecloud/kubeblocks/pull/8416) + +#### Data Protection + +* **Incremental Backups**: Adds support for efficient, incremental backups [#8693](https://github.com/apecloud/kubeblocks/pull/8693) +* **Backup/Restore Parameters**: Ensures config consistency during backup/restore [#8472](https://github.com/apecloud/kubeblocks/pull/8472) +* **Retain Latest Backup**: Adds option to retain the latest backup [#9088](https://github.com/apecloud/kubeblocks/pull/9088) + +#### Operations + +* **OpsRequest Validation**: Introduces validation policies to enforce operation correctness [#8232](https://github.com/apecloud/kubeblocks/pull/8232) + +--- + +### KubeBlocks Addons + +#### MySQL + +* **TLS Support**: Adds TLS for secure connections [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) +* **Group Replication with ProxySQL**: Enhances HA via ProxySQL integration [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) +* **PITR Support**: Enables Point-in-Time Recovery using WAL-G [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) +* **Continuous & Incremental Backups**: Improves backup strategy with WAL-G [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) + +#### Redis + +* **Cluster Switchover & Sentinel Optimization**: Enhances failover capabilities [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) +* **Instance Rebuild**: Adds support for rebuilding Redis instances [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) + +#### MongoDB + +* **PITR Support**: Adds Point-in-Time Recovery [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) +* **New Version Support**: Adds support for MongoDB versions 8.0.8 and 8.0.6 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) + +#### Kafka + +* **External ZooKeeper**: Adds external ZooKeeper support for Kafka 2.7 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) +* **Custom Prometheus Metrics**: Allows configuring custom metrics [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) +* **Skip Advertise Port Parsing**: Avoids parsing when using Pod IP [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) +* **Custom Security Context**: Adds support for custom security settings [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) + +#### RabbitMQ + +* **New Versions**: Adds support for RabbitMQ versions 4.0.9 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) + +#### ClickHouse + +* **22.9.4 Support**: Adds compatibility with ClickHouse 22.9.4 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) + +#### TiDB + +* **8.4 Version Support**: Adds support for TiDB 8.4 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) +* **Upgrade to 6.5.12**: Updates TiDB 6 to v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) + +--- + +### API Graduations, Deprecations, and Removals + +#### GA (v1) API Promotions + +The following CRDs are now **promoted to `v1` and considered stable**, with long-term support: + +**`apps.kubeblocks.io` API Group** + +- `ClusterDefinition` +- `Cluster` +- `ComponentDefinition` +- `Component` +- `ComponentVersion` +- `ServiceDescriptor` +- `ShardingDefinition` +- `SidecarDefinition` + +**`workloads.kubeblocks.io` API Group** + +- `InstanceSet` + +> `v1alpha1` and `v1beta1` versions of these resources are now deprecated and may be dropped in a future release. + +#### Deprecations + +The following CRDs are deprecated and will be **removed in upcoming releases**. Migrate your configurations accordingly: + +* `ConfigConstraint` +* `Configuration` + +> These resources are no longer maintained or receiving updates. + +#### New Alpha APIs (Experimental) + +A new **`parameters.kubeblocks.io`** API group introduces fine-grained configuration APIs: + +* `ComponentParameter` +* `ParamConfigRenderer` +* `Parameter` +* `ParametersDefinition` + +> These are intended to replace the deprecated `ConfigConstraint` and `Configuration`. + +#### API Group Realignment + +Several APIs have moved to new groups to better reflect their domain responsibilities. Update your manifests accordingly: + +| Resource | Old API Group | New API Group | +| ------------------------------ | -------------------- | ------------------------------ | +| `OpsDefinition` / `OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | +| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | + + +## Upgrade Notice + +:::note +Please note that direct upgrades from version 0.9 to 1.0 are NOT currently supported. +::: + +We are actively developing a robust and tested upgrade path, which will be included in the upcoming release. + +## Acknowledgments + +As we celebrate the release of KubeBlocks 1.0.0, we’d like to extend our sincere thanks to all the engineers, contributors, and partners who have helped shape the project into what it is today. + +Special thanks to Kuaishou, China Mobile Cloud, Vipshop, Tencent, and 360 for their technical contributions, in-depth feedback, and real-world use cases that have significantly advanced the project’s architecture, performance, and production readiness. + +We’re also deeply grateful to our community contributors — your code contributions, issue reports, discussions, and reviews have been instrumental in driving the project forward with quality and innovation. + +With 1.0.0, we’ve reached a major milestone in building a robust, cloud-native database platform. We look forward to continuing this journey with all of you — growing the ecosystem, tackling new challenges, and pushing the boundaries of what’s possible in data infrastructure. \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/_category_.yml b/docs/en/release-1_0_1/user_docs/release_notes/release-10/_category_.yml new file mode 100644 index 00000000..4ce58fcb --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/_category_.yml @@ -0,0 +1,4 @@ +position: 1 +label: Release v1.0 +collapsible: true +collapsed: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/troubleshooting/_category_.yml b/docs/en/release-1_0_1/user_docs/troubleshooting/_category_.yml new file mode 100644 index 00000000..aa307195 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/troubleshooting/_category_.yml @@ -0,0 +1,4 @@ +position: 61 +label: Trouble Shooting +collapsible: true +collapsed: true diff --git a/docs/en/release-1_0_1/user_docs/troubleshooting/handle-a-cluster-exception.mdx b/docs/en/release-1_0_1/user_docs/troubleshooting/handle-a-cluster-exception.mdx new file mode 100644 index 00000000..32775105 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/troubleshooting/handle-a-cluster-exception.mdx @@ -0,0 +1,210 @@ +--- +title: FAQs +description: How to handle an exception in a cluster +keywords: [cluster exception] +sidebar_position: 1 +sidebar_label: FAQs +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# FAQs + +### List of K8s Resources created by KubeBlocks when creating a Cluster + +To get the full list of associated resources created by KubeBlocks for given cluster: + +```bash +kubectl get cmp,its,po -l app.kubernetes.io/instance= -n demo # cluster and worload +kubectl get backuppolicy,backupschedule,backup -l app.kubernetes.io/instance= -n demo # data protection resources +kubectl get componentparameter,parameter -l app.kubernetes.io/instance= -n demo # configuration resources +kubectl get opsrequest -l app.kubernetes.io/instance= -n demo # opsrequest resources +kubectl get svc,secret,cm,pvc -l app.kubernetes.io/instance= -n demo # k8s native resources +``` + +For troubleshooting, + +1. describe resource such as Cluster, Component, e.g. +```bash +kubectl describe TYPE NAME +``` + +2. check database instance logs +```bash +kubectl logs -c +``` + +3. check KubeBlocks logs +```bash +kubectl -n kb-system logs deployments/kubeblocks -f +``` + +### How to get the detail of each backup method + +Details of each backup method are defined in `ActionSet` in KubeBlocks. + +For example, To get the `ActionSet` which defines the behavior of backup method named `wal-g-archive` in PostgreSQL, for instance: + +```bash +kubectl -n demo get bp pg-cluster-postgresql-backup-policy -oyaml | yq '.spec.backupMethods[] | select(.name=="wal-g-archive") | .actionSetName' +``` + +ActionSet defined: + +- backup type +- both backup and restore procedures +- environment variables used in procedures + +And you may check details of each ActionSet to find out how backup and restore will be performed. + + +### How to Check Compatible versions + +Versions and it compatibility rules are embedded in `ComponentVersion` CR in KubeBlocks. +To the the list of compatible versions: + +```bash +kubectl get cmpv postgresql -ojson | jq '.spec.compatibilityRules' +``` + +
+ +Example Output + +```json +[ + { + "compDefs": [ + "postgresql-12-" + ], + "releases": [ + "12.14.0", + "12.14.1", + "12.15.0" + ] + }, + { + "compDefs": [ + "postgresql-14-" + ], + "releases": [ + "14.7.2", + "14.8.0" + ] + } +] +``` + +
+ +Releases are grouped by component definitions, and each group has a list of compatible releases. +In this example, it shows you can upgrade from version `12.14.0` to `12.14.1` or `12.15.0`, and upgrade from `14.7.2` to `14.8.0`. +But cannot upgrade from `12.14.0` to `14.8.0`. + + +### ComponentDefinition status is Unavailable + +If you made some changes to the ComponentDefinition, the status of ComponentDefinition may turn to `Unavailable`. +KubeBlocks sets the ComponentDefinition as `Unavailable` to prevent the changes from affecting existing clusters. + +By describing the ComponentDefinition, you can see following message: + +```text +Status: + Message: immutable fields can't be updated + Observed Generation: 3 + Phase: Unavailable +``` + +If the changes made are on-purpose, you can annotate the ComponentDefinition by running the following command: + +```bash +kubectl annotate componentdefinition \ apps.kubeblocks.io/skip-immutable-check\=true +``` + + +### Failed to Install KubeBlocks on K8s \<\= 1.23 + +If you are using K8s \<\= 1.23, you may encounter the following error when installing KubeBlocks: + +```bash +unknown field "x-kubernetes-validations" .... if you choose to ignore these errors, turn validation off with --validate\=false +``` + +This is because the `x-kubernetes-validations` field is not supported in K8s \<\= 1.23. + +You can fix this by running the following command: + +```bash +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v1.0.0/kubeblocks_crds.yaml --validate\=false +``` + +### How to cancel a running OpsRequest + +KubeBlocks supports to `cancel` OpsRequest meets the following conditions: + +- The OpsRequest is in `Running` state +- The OpsRequest type is `VerticalScaling`, `HorizontalScaling` + +To cancel a running OpsRequest, you can run the following command: + +```bash +kubectl patch opsrequest -p '{"spec":{"cancel":true}}' --type=merge +``` + +### Cluster/Component stuck in `Updating` status + +If you find that a cluster/component is stuck in `Updating` status: + + +1. check if all pods are in `Running` status +2. check pod logs to see if there are any errors +3. check if each Pod has expected `roles` if required, to check the `roles` of a Pod, you can run the following command: +```bash +kubectl get po -L kubeblocks.io/role +``` +4. check for Pods, if the container status image matches the pod spec image +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: +spec: + containers: + - image: repo/image:tag # <==== image in spec + name: c1 +status: + containerStatuses: + containerID: containerd://123456 + image: repo/image:tag # <====== image in status + imageID: repo/image:tag@sha256:123456 + name: c1 +``` +If the two fields are not match, please check if there are two or more images share the same `IMAGG ID` but of different `IMAGE` tags. + +If so, please remove those images on your node and create a new Cluster. + + +### Cluster stuck in `Deleting` status, and KubeBlocks logs: `has no pods to running the pre-terminate action` + +When deleting a cluster, one may find the cluster stuck in `Deleting` status, and the following error in KubeBlocks logs: + +```bash +kubectl -n kb-system logs deployments/kubeblocks -f +``` +And you may see the following error in KubeBlocks logs: + +```bash +> INFO build error: has no pods to running the pre-terminate action +``` + +This is because KubeBlocks will run the `pre-terminate` lifecycle action if defined in corresponding `ComponentDefinition`. +If there are no pods to run the pre-terminate action, the cluster will stuck in `Deleting` status until the pre-terminate action is completed. +To skip the pre-terminate action, you can annotate the Component by running the following command: + +```bash +kubectl annotate component apps.kubeblocks.io/skip-pre-terminate-action=true +``` + +This case happens when you create a cluster but for some reason, it failed to create any pod (e.g. failed to pull the image or network issue or not enough resources). \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/troubleshooting/known-issues.mdx b/docs/en/release-1_0_1/user_docs/troubleshooting/known-issues.mdx new file mode 100644 index 00000000..b08418b6 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/troubleshooting/known-issues.mdx @@ -0,0 +1,91 @@ +--- +title: Known Issues +description: Known issues in KubeBlocks +keywords: [KubeBlocks, Known Issues] +sidebar_position: 2 +sidebar_label: Known Issues +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Known Issues + +## Issue 1: KubeBlocks creates enormous number of secrets + +### Problem Description +KubeBlocks keeps creating an enormous number of secrets for each cluster and never stops. You may see the following information in **KubeBlocks** logs: + +```bash +INFO reconcile object *v1.ServiceAccount with action UPDATE OK +``` + +### Affected Version +- KubeBlocks v1.0.0 with Kubernetes versions ≤ 1.24 + +### Root Cause +Before Kubernetes version 1.24, Kubernetes automatically generated Secret-based tokens for ServiceAccounts, as documented in [Kubernetes Service Account Tokens](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/). + +### Solution +Upgrade KubeBlocks to `v1.0.1-beta.3` or later. + +--- + +## Issue 2: PostgreSQL fails to start with special characters in password + +### Problem Description +PostgreSQL may fail to start when the password contains certain special characters. By checking POD logs: +```bash +File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 116, in check_token + self.fetch_more_tokens() + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 231, in fetch_more_tokens + return self.fetch_anchor() + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 621, in fetch_anchor + self.tokens.append(self.scan_anchor(AnchorToken)) + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 929, in scan_anchor + raise ScannerError("while scanning an %s" % name, start_mark, +yaml.scanner.ScannerError: while scanning an anchor + in "", line 45, column 17: + password: &JgE#F5x&eNwis*2dW!7& ... + ^ +``` + +### Affected Version +- KubeBlocks v0.9.4 and before +- KubeBlocks v1.0.0 + +### Solution + +Upgrade KubeBlocks to `v1.0.1-beta.6` or `v0.9.5-beta.4` or later. + +To fix this, you can explicitly set the list of symbols allowed in password generation policy. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: +spec: + componentSpecs: + - name: postgresql + systemAccounts: + - name: postgres + passwordConfig: + length: 20 # Password length: 20 characters + numDigits: 4 # At least 4 digits + numSymbols: 2 # At least 2 symbols + letterCase: MixedCases # Uppercase and lowercase letters + symbolCharacters: '!' # set the allowed symbols when generating password +# other fields in the Cluster manifest are omitted for brevity +``` + +## How to report a bug + +To report a bug, you may + +1. create an issue in the [KubeBlocks GitHub repository](https://github.com/apecloud/kubeblocks/issues/). +2. [optional] provide the zip file generated by `kbcli report` command: +```bash +kbcli report cluster --with-logs --mask # pack cluster manifest, version, and logs +kbcli report kubeblocks --with-logs --mask # pack kubeblocks logs +``` +where `clusterName` is the name of the cluster you are reporting the bug for, and `--mask` will mask sensitive info for secrets and ConfigMap. diff --git a/docs/en/release-1_0_1/user_docs/upgrade/_category_.yml b/docs/en/release-1_0_1/user_docs/upgrade/_category_.yml new file mode 100644 index 00000000..da805a60 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/upgrade/_category_.yml @@ -0,0 +1,5 @@ +position: 100 +label: Upgrade KubeBlocks +collapsible: true +collapsed: true +hidden: true \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_8.mdx b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_8.mdx new file mode 100644 index 00000000..79e90e12 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_8.mdx @@ -0,0 +1,115 @@ +--- +title: Upgrade to v0.8 +description: Upgrade to KubeBlocks v0.8, operation, tips and notes +keywords: [upgrade, 0.8] + +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Upgrade to KubeBlocks v0.8 + +In this tutorial, you will learn how to upgrade to KubeBlocks v0.8. + +:::note + +Execute `helm -n kb-system list | grep kubeblocks` or `kbcli version` to check the current KubeBlocks version you are running, and then upgrade it. + +::: + +## Upgrade from KubeBlocks v0.7 + + + + + +1. Set keepAddons. + + KubeBlocks v0.8 streamlines the default installed engines and separates the addons from KubeBlocks operators to KubeBlocks-Addons repo, such as greptime, influxdb, neon, oracle-mysql, orioledb, tdengine, mariadb, nebula, risingwave, starrocks, tidb, and zookeeper. To avoid deleting addon resources that are already in use during the upgrade, execute the following commands: + +- Check the current KubeBlocks version. + + ```bash + helm -n kb-system list | grep kubeblocks + ``` + +- Set the value of keepAddons as true. + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update kubeblocks + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version \{VERSION\} --set keepAddons=true + ``` + + Replace \{VERSION\} with your current KubeBlocks version, such as 0.7.2. + +- Check addons. + + Execute the following command to ensure that the addon annotations contain `"helm.sh/resource-policy": "keep"`. + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, annotations: .metadata.annotations}' + ``` + +2. Install CRD. + + To reduce the size of Helm chart, KubeBlocks v0.8 removes CRD from the Helm chart. Before upgrading, you need to install CRD. + + ```bash + kubectl replace -f https://github.com/apecloud/kubeblocks/releases/download/v0.8.1/kubeblocks_crds.yaml + ``` + +3. Upgrade KubeBlocks. + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.8.1 --set dataProtection.image.datasafed.tag=0.1.0 + ``` + +:::note + +To avoid affecting existing database clusters, when upgrading to KubeBlocks v0.8, the versions of already-installed addons will not be upgraded by default. If you want to upgrade the addons to the versions built into KubeBlocks v0.8, execute the following command. Note that this may restart existing clusters and affect availability. Please proceed with caution. + +```bash +helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.8.1 --set upgradeAddons=true +``` + +::: + + + + + +1. Download kbcli v0.8. + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.8.1 + ``` + +2. Upgrade KubeBlocks. + + ```bash + kbcli kb upgrade --version 0.8.1 --set dataProtection.image.datasafed.tag=0.1.0 + ``` + + kbcli will automatically add the annotation `"helm.sh/resource-policy": "keep"` to ensure that existing addons are not deleted during the upgrade. + + + + + +## Upgrade from KubeBlocks v0.6 + +If you are currently running KubeBlocks v0.6, please upgrade to v0.7.2 first. + +1. Download kbcli v0.7.2. + + ```shell + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.7.2 + ``` + +2. Upgrade to KubeBlocks v0.7.2. + + ```shell + kbcli kb upgrade --version 0.7.2 + ``` \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_9_0.mdx b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_9_0.mdx new file mode 100644 index 00000000..f05e0075 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-0_9_0.mdx @@ -0,0 +1,171 @@ +--- +title: Upgrade to v0.9.0 +description: Upgrade to KubeBlocks v0.9.0, operation, tips and notes +keywords: [upgrade, 0.9.0] + +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Upgrade to KubeBlocks v0.9.0 + +In this tutorial, you will learn how to upgrade to KubeBlocks v0.9.0. + +:::note + +Execute `helm -n kb-system list | grep kubeblocks` or `kbcli version` to check the current KubeBlocks version you are running, and then upgrade KubeBlocks. + +::: + +## Compatibility + +KubeBlocks 0.9.0 is compatible with KubeBlocks 0.8 APIs, but compatibility with APIs from versions prior to v0.8 is not guaranteed. If you are using Addons from KubeBlocks 0.7 or earlier (0.6, etc), DO [upgrade KubeBlocks and all Addons to v0.8 first](../upgrade/upgrade-to-0_8) to ensure service availability before upgrading to v0.9.0. + +## Upgrade from KubeBlocks v0.8 + + + + + +1. Add the `"helm.sh/resource-policy": "keep"` for Addons. + + KubeBlocks v0.8 streamlines the default installed engines. To avoid deleting Addon resources that are already in use during the upgrade, execute the following commands first. + + - Add the `"helm.sh/resource-policy": "keep"` for Addons. You can replace `-l app.kubernetes.io/name=kubeblocks` with your actual filter name. + + ```bash + kubectl annotate addons.extensions.kubeblocks.io -l app.kubernetes.io/name=kubeblocks helm.sh/resource-policy=keep + ``` + + - Check Addons. + + Execute the following command to ensure that the Addon annotations contain `"helm.sh/resource-policy": "keep"`. + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, annotations: .metadata.annotations}' + ``` + +2. Delete the incompatible OpsDefinition. + + ```bash + kubectl delete opsdefinitions.apps.kubeblocks.io kafka-quota kafka-topic kafka-user-acl switchover + ``` + +3. Install the StorageProvider CRD before the upgrade. + + If the network is slow, it's recommended to download the CRD YAML file on your localhost before further operations. + + ```bash + kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v0.9.0/dataprotection.kubeblocks.io_storageproviders.yaml + ``` + +4. Upgrade KubeBlocks. + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.0 + ``` + + :::note + + To avoid affecting existing database clusters, when upgrading to KubeBlocks v0.9.0, the versions of already-installed Addons will not be upgraded by default. If you want to upgrade the Addons to the versions built into KubeBlocks v0.9.0, execute the following command. Note that this may restart existing clusters and affect availability. Please proceed with caution. + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.0 \ + --set upgradeAddons=true + ``` + + ::: + + + + + +1. Download kbcli v0.9.0. + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.9.0 + ``` + +2. Upgrade KubeBlocks. + + ```bash + kbcli kb upgrade --version 0.9.0 + ``` + + :::note + + To avoid affecting existing database clusters, when upgrading to KubeBlocks v0.9.0, the versions of already-installed Addons will not be upgraded by default. If you want to upgrade the Addons to the versions built into KubeBlocks v0.9.0, execute the following command. Note that this may restart existing clusters and affect availability. Please proceed with caution. + + ```bash + kbcli kb upgrade --version 0.9.0 --set upgradeAddons=true + ``` + + ::: + + kbcli will automatically add the annotation `"helm.sh/resource-policy": "keep"` to ensure that existing Addons are not deleted during the upgrade. + + + + + +## Upgrade Addons + +If you didn't specify `upgradeAddons` as `true` or your Addon is not included in the default installed Addons, you can upgrade Addons by running the commands provided below to use the v0.9.0 API. + +:::note + +If the Addon you want to upgrade is `mysql`, you need to upgrade this Addon and restart the cluster. Otherwise, the cluster created in KubeBlocks v0.8 cannot be used in v0.9.0. + +If the Addon you want to use is `clickhouse/milvus/elasticsearch/llm`, you need to upgrade KubeBlocks first and then upgrade this Addon. Otherwise, these Addons cannot be used in KubeBlocks v0.9.0 normally. + +::: + + + + + +```bash +# Add Helm repo +helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + +# If github is not accessible or very slow for you, please use following repo instead +helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + +# Update helm repo +helm repo update + +# Update Addon version +helm upgrade -i {addon-release-name} kubeblocks-addons/{addon-name} --version x.y.z -n kb-system +``` + + + + + +```bash +# View the Addon index list +kbcli addon index list + +# Update one index and the default index is kubeblocks +kbcli addon index update kubeblocks + +# Search available Addon versions +kbcli addon search + +# Install an Addon +kbcli addon install --version x.y.z + +# Upgrade this Addon to a specified version +kbcli addon upgrade --version x.y.z + +# Force to upgrade to a specified version +kbcli addon upgrade --version x.y.z --force + +# View the available Addon versions +kbcli addon list | grep +``` + + + + diff --git a/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-v09-version.mdx b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-v09-version.mdx new file mode 100644 index 00000000..4a994f64 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/upgrade/upgrade-to-v09-version.mdx @@ -0,0 +1,413 @@ +--- +title: Upgrade to v0.9.x +description: Upgrade to KubeBlocks v0.9.x, operation, tips and notes +keywords: [upgrade, kubeblocks] +sidebar_position: 1 +sidebar_label: Upgrade to v0.9.x +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +export const LATEST_VERSION = "v0.9.5"; + +{/* Custom component to render code blocks with variable substitution */} +export const VersionedCodeBlock = ({ children, language = "bash" }) => ( +
+    
+      {children.replace(/{LATEST_VERSION}/g, LATEST_VERSION)}
+    
+  
+); + +# Upgrade to KubeBlocks v0.9.x + +This guide walks you through upgrading KubeBlocks to version 0.9.x. The v0.9.x release series delivers significant improvements in performance, stability, and introduces powerful new features to enhance your database management experience. + +## Before You Begin + +### Verify Your Current Version + +First, check which version of KubeBlocks you're currently running: + +```bash +# Using helm +helm -n kb-system list | grep kubeblocks + +# Using kbcli +kbcli version +``` + +### Choose Your Upgrade Path + +Select the appropriate upgrade path based on your current version: + +| Current Version | Target: v0.9.x | Upgrade Path | +|-----------------|----------------|---------------| +| v0.9.x | ✅ Direct upgrade | Follow [Upgrade from KubeBlocks v0.9.x](#upgrade-from-kubeblocks-v09x) | +| v0.8.x | ✅ Direct upgrade | Follow [Upgrade from KubeBlocks v0.8.x](#upgrade-from-kubeblocks-v08x) | +| v0.7.x or earlier | ⚠️ Multi-step upgrade | Upgrade to v0.8.x first, then to v0.9.x | + +:::tip +We recommend upgrading to the latest stable version for optimal performance and access to the newest features. +::: + +## Upgrade from KubeBlocks v0.9.x + +This section covers upgrading between v0.9.x versions (for example, from v0.9.1 to v0.9.5). Since these are patch releases within the same major version, the upgrade process is straightforward. + +:::note +**Version Placeholder:** + +Throughout this section, replace {LATEST_VERSION} with your target version number (e.g., `v0.9.5`, `v0.9.4`). +::: + + + + + + **Step 1: Update Custom Resource Definitions (CRDs)** + + KubeBlocks separates CRDs from the Helm chart to reduce chart size. Start by updating the CRDs: + + +{`kubectl replace -f https://github.com/apecloud/kubeblocks/releases/download/{LATEST_VERSION}/kubeblocks_crds.yaml`} + + + **Step 2: Upgrade KubeBlocks Core** + + First, update your Helm repository to get the latest chart information: + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update kubeblocks + ``` + + Then upgrade KubeBlocks to the target version: + +{`helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version {LATEST_VERSION} --set crd.enabled=false`} + + + :::note + **Why `crd.enabled=false`?** + + Since we're upgrading within the same v0.9.x series, no API conversion is needed. Setting `crd.enabled=false` skips the CRD upgrade task for faster deployment. + ::: + + **Step 3: (Optional) Upgrade Addons** + + :::warning + **Addon Upgrade Impact:** + + Upgrading Addons may restart existing database clusters and affect availability. Only proceed during maintenance windows, or upgrade Addons individually as needed. + ::: + + To upgrade Addons to the versions bundled with the new KubeBlocks version: + + +{`helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version {LATEST_VERSION} \\ + --set upgradeAddons=true \\ + --set crd.enabled=false`} + + + + + + + **Step 1: Update kbcli** + + Download and install the matching version of kbcli: + + +{`curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s {LATEST_VERSION}`} + + + **Step 2: Upgrade KubeBlocks Core** + + Perform the core upgrade without updating Addons: + + +{`kbcli kb upgrade --version {LATEST_VERSION}`} + + + **Step 3: (Optional) Upgrade Addons** + + :::warning + **Addon Upgrade Impact:** + + Upgrading Addons may restart existing database clusters and affect availability. Only proceed during maintenance windows, or upgrade Addons individually as needed. + ::: + + To upgrade Addons to the versions bundled with the new KubeBlocks version: + + +{`kbcli kb upgrade --version {LATEST_VERSION} --set upgradeAddons=true`} + + + + + + +## Upgrade from KubeBlocks v0.8.x + +This section covers upgrading from v0.8.x to v0.9.x. This is a major version upgrade that requires additional preparation steps due to API changes. + +:::note +**Key API Changes in v0.9.x:** + +- Storage provider group updated from `storage.kubeblocks.io` to `dataprotection.kubeblocks.io` +- Enhanced ConfigConstraint API with multi-version support +::: + + + + + + **Step 1: Add the `"helm.sh/resource-policy": "keep"` annotation to Addons** + + KubeBlocks v0.9 changed the default installed Addons, so you need to add the `"helm.sh/resource-policy": "keep"` annotation to Addons to avoid deleting them during the upgrade. + + ```bash + kubectl get addons -oname | xargs -I {} kubectl annotate {} helm.sh/resource-policy=keep + ``` + + **Step 2: Clean Up Incompatible Resources** + + Remove OpsDefinition that are incompatible with v0.9.x: + + ```bash + # Delete specific incompatible OpsDefinitions + kubectl delete opsdefinitions.apps.kubeblocks.io kafka-quota kafka-topic kafka-user-acl switchover + + # Alternative: Delete all OpsDefinitions (they will be recreated during upgrade) + kubectl delete opsdefinitions.apps.kubeblocks.io --all + ``` + + :::note + 1. If you have created any OpsDefinition, you need to delete them before the upgrade. + 2. If you forget to delete the OpsDefinition before upgrading CRDs, these OpsDefinitions will be INVALID and cannot be updated or deleted. You must patch and delete them before the upgrade. + ```bash + # patch opsdef with a mock action to make it valid + kubectl get opsdefinitions.apps.kubeblocks.io -oname | xargs -I {} kubectl patch {} --type=merge -p '{"spec":{"actions":[{"name":"mock","exec":{"podInfoExtractorName":"test","command":["echo 1"]}}]}}' + # delete them + kubectl delete opsdefinitions.apps.kubeblocks.io --all + ``` + ::: + + **Step 3: Update Custom Resource Definitions (CRDs)** + + Install the new Storage Provider CRD required for v0.9.x and update all KubeBlocks CRDs to the new version: + + +{`kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/{LATEST_VERSION}/kubeblocks_crds.yaml || kubectl replace -f https://github.com/apecloud/kubeblocks/releases/download/{LATEST_VERSION}/kubeblocks_crds.yaml`} + + + :::note + **Network Optimization:** + + Or download the CRD YAML files locally first, then apply them using `kubectl create -f || kubectl replace -f `. + ::: + + **Step 3: Update Helm Repository and Upgrade KubeBlocks** + + Update your Helm repository to get the latest chart information: + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update kubeblocks + ``` + + Enable webhooks for multi-version conversion (it will convert ConfigConstraint API to the new version): + + +{`helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version {LATEST_VERSION} \\ + --set admissionWebhooks.enabled=true \\ + --set admissionWebhooks.ignoreReplicasCheck=true`} + + + :::note + **Configuration Options Explained:** + + - `admissionWebhooks.enabled=true`: Enables webhook for ConfigConstraint API multi-version conversion + - `admissionWebhooks.ignoreReplicasCheck=true`: Allows webhook to run with single replica deployments + ::: + + **Step 4: (Optional) Upgrade Addons** + + :::warning + **Addon Upgrade Impact:** + + Upgrading Addons may restart existing database clusters and affect availability. Only proceed during maintenance windows. Or you can upgrade Addons individually. + ::: + + +{`helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version {LATEST_VERSION} \\ + --set upgradeAddons=true \\ + --set admissionWebhooks.enabled=true \\ + --set admissionWebhooks.ignoreReplicasCheck=true`} + + + + + + + **Step 1: Update kbcli** + + Download and install kbcli for the target version: + + +{`curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s {LATEST_VERSION}`} + + + Verify the installation: + ```bash + kbcli version + ``` + + **Step 2: Upgrade KubeBlocks with Webhook Support** + + Upgrade KubeBlocks with enhanced API validation and multi-version support: + + +{`kbcli kb upgrade --version {LATEST_VERSION} \\ + --set admissionWebhooks.enabled=true \\ + --set admissionWebhooks.ignoreReplicasCheck=true`} + + + :::note + **Configuration Options Explained:** + + - `admissionWebhooks.enabled=true`: Enables webhook for ConfigConstraint API multi-version conversion + - `admissionWebhooks.ignoreReplicasCheck=true`: Allows webhook to run with single replica deployments (useful for development environments) + ::: + + **Step 3: (Optional) Upgrade Addons** + + :::warning + **Addon Upgrade Impact:** + + Upgrading Addons may restart existing database clusters and affect availability. Only proceed during maintenance windows, or upgrade Addons individually as needed. + ::: + + To upgrade Addons along with KubeBlocks: + + +{`kbcli kb upgrade --version {LATEST_VERSION} \\ + --set upgradeAddons=true \\ + --set admissionWebhooks.enabled=true \\ + --set admissionWebhooks.ignoreReplicasCheck=true`} + + + + + + +Some Addons require special handling when upgrading from v0.8.x to v0.9.x. + +- **Required**: The `mysql` Addon must be upgraded when moving from v0.8.x to v0.9.x. Clusters created in v0.8.x will not function properly in v0.9.x without this upgrade. +- **Recommended**: For `clickhouse`, `milvus`, `elasticsearch`, and `llm` Addons, upgrade KubeBlocks first, then upgrade these Addons for optimal functionality. + +## Individual Addon Upgrades + +If you chose not to upgrade Addons during the KubeBlocks upgrade, or if you need to upgrade specific Addons, follow these steps to upgrade Addons individually. + +### Upgrade Methods + + + + + + **Step 1: Configure Addon Repository** + + ```bash + # Add the primary Addon repository + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + + # Alternative repository for users in regions with limited GitHub access + # helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # Update repository information + helm repo update + ``` + + **Step 2: Find Available Versions** + + ```bash + # Search for available versions of a specific Addon + helm search repo kubeblocks-addons/{addon-name} --versions --devel + # Example: helm search repo kubeblocks-addons/mysql --versions --devel + ``` + + **Step 3: Upgrade the Addon** + + Select a version that is compatible (same major version) with your KubeBlocks release and upgrade: + + ```bash + # Upgrade to a specific version + helm upgrade -i {addon-release-name} kubeblocks-addons/{addon-name} --version x.y.z -n kb-system + ``` + + :::note + **Parameter Reference:** + - `{addon-name}`: Replace with the actual Addon name (e.g., `mysql`, `postgresql`) + - `{addon-release-name}`: The release name of the installed Addon (e.g., `kb-addon-mysql`, `kb-addon-postgresql`) + - `x.y.z`: The target version number, compatible with your KubeBlocks release + ::: + + + + + + **Step 1: Update Addon Index** + + ```bash + # List available Addon indexes (the default index is `kubeblocks`) + kbcli addon index list + + # Update the default KubeBlocks index + kbcli addon index update kubeblocks + ``` + + **Step 2: Search for Available Addon Versions** + + ```bash + # Search for available versions of a specific Addon + kbcli addon search {addon-name} + + # View currently installed Addons and their versions + kbcli addon list | grep {addon-name} + ``` + + **Step 3: Upgrade the Addon** + + Select a version that is compatible (same major version) with your KubeBlocks release and upgrade: + + ```bash + kbcli addon upgrade {addon-name} --version x.y.z + ``` + + **Force upgrade (use with caution):** + ```bash + kbcli addon upgrade {addon-name} --version x.y.z --force + ``` + + **Fresh installation (if not previously installed):** + ```bash + kbcli addon install {addon-name} --version x.y.z + ``` + + :::note + **When to use `--force`:** + - You need to downgrade to a previous version + - Use with caution as it bypasses compatibility checks + ::: + + + + + +## Getting Help + +If you encounter issues during the upgrade process: + +- **Check the logs**: Review upgrade logs using `kubectl -n kb-system logs deployment/kubeblocks` +- **Community Support**: Visit the [KubeBlocks GitHub Issues](https://github.com/apecloud/kubeblocks/issues) for known issues and community help \ No newline at end of file diff --git a/package.json b/package.json index 4366665a..c4c740d3 100644 --- a/package.json +++ b/package.json @@ -29,6 +29,7 @@ "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-scroll": "^1.8.10", "@types/remark-heading-id": "^1.0.0", + "@types/semver": "^7.7.1", "cheerio": "^1.0.0", "fast-glob": "^3.3.3", "gray-matter": "^4.0.3", @@ -59,6 +60,7 @@ "remark-github-admonitions-to-directives": "^2.1.0", "remark-heading-id": "^1.0.1", "remark-mdx-frontmatter": "^5.2.0", + "semver": "^7.7.2", "slick-carousel": "^1.8.1", "uniqolor": "^1.1.1", "unist-util-visit": "^5.0.0", diff --git a/src/app/[locale]/docs/[version]/[category]/layout.tsx b/src/app/[locale]/docs/[version]/[category]/layout.tsx index 34850598..98162d76 100644 --- a/src/app/[locale]/docs/[version]/[category]/layout.tsx +++ b/src/app/[locale]/docs/[version]/[category]/layout.tsx @@ -11,6 +11,7 @@ import fs from 'fs'; import { setStaticParamsLocale } from 'next-international/server'; import path from 'path'; import VersionList from './version'; +import semver from 'semver'; export default async function DocsLayout({ children, @@ -35,6 +36,22 @@ export default async function DocsLayout({ } const versions = fs.readdirSync(path.join(DOCS_DIR, locale)); + // sort version in orders of + // 1. preview is alway the first + // 2. other versions are sorted in descending semver order + versions.sort((a, b) => { + if (a === 'preview') return -1; + if (b === 'preview') return 1; + // Use the 'semver' library for proper semver comparison + // Make sure to install 'semver' (npm install semver) + if (a === b) return 0; + // If both are valid semver, compare them + if (semver.valid(a) && semver.valid(b)) { + return semver.rcompare(a, b); // descending order + } + // Fallback to string comparison if not valid semver + return a < b ? 1 : a > b ? -1 : 0; + }); return ( <> diff --git a/src/app/[locale]/docs/[version]/[category]/version.tsx b/src/app/[locale]/docs/[version]/[category]/version.tsx index aa7d4a90..ac8ac852 100644 --- a/src/app/[locale]/docs/[version]/[category]/version.tsx +++ b/src/app/[locale]/docs/[version]/[category]/version.tsx @@ -91,7 +91,7 @@ export default function VersionList({ version, versions }: Props) { sx={{ paddingBlock: 1.2 }} > {v === version && } - {v.replace(/_/, '.')} + {v.replace(/_/g, '.')} ); })} diff --git a/src/app/api/search-index/route.ts b/src/app/api/search-index/route.ts index 78ed8428..bed44993 100644 --- a/src/app/api/search-index/route.ts +++ b/src/app/api/search-index/route.ts @@ -38,12 +38,12 @@ function getAllMdxFiles(dir: string, basePath: string = ''): string[] { return files; } -// 从文件内容中提取标题和正文 +// Extract title and main content from file content function extractContent(filePath: string): SearchDocument { const content = readFileSync(filePath, 'utf-8'); const { data, content: markdownContent } = matter(content); - // 过滤掉 mdx 的 import/export 语句和宏/JSX函数 + // Filter out import/export statements and macro/JSX functions from mdx const filteredContent = markdownContent .split('\n') .filter( @@ -54,28 +54,28 @@ function extractContent(filePath: string): SearchDocument { !/^\{.*\}$/.test(line.trim()), ) .join('\n') - .replace(/<[^>]+>/g, '') // 去除内联 JSX 标签 - .replace(/\{[^}]+\}/g, ''); // 去除内联 JS 表达式 + .replace(/<[^>]+>/g, '') // Remove inline JSX tags + .replace(/\{[^}]+\}/g, ''); // Remove inline JS expressions - // 移除 markdown 语法,只保留纯文本 + // Remove markdown syntax, only keep plain text const plainContent = filteredContent - .replace(/```[\s\S]*?```/g, '') // 移除代码块 - .replace(/`.*?`/g, '') // 移除行内代码 - .replace(/\[([^\]]+)\]\([^)]+\)/g, '$1') // 将链接转换为纯文本 - .replace(/#{1,6}\s/g, '') // 移除标题标记 - .replace(/\*\*/g, '') // 移除加粗标记 - .replace(/\*/g, '') // 移除斜体标记 - .replace(/\n/g, ' ') // 将换行转换为空格 - .replace(/\s+/g, ' ') // 将多个空格转换为单个空格 + .replace(/```[\s\S]*?```/g, '') // Remove code blocks + .replace(/`.*?`/g, '') // Remove inline code + .replace(/\[([^\]]+)\]\([^)]+\)/g, '$1') // Convert links to plain text + .replace(/#{1,6}\s/g, '') // Remove title markers + .replace(/\*\*/g, '') // Remove bold markers + .replace(/\*/g, '') // Remove italic markers + .replace(/\n/g, ' ') // Convert line breaks to spaces + .replace(/\s+/g, ' ') // Convert multiple spaces to single space .trim(); - // 生成摘要 + // Generate summary const summary = plainContent.length > 200 ? plainContent.substring(0, 200) + '...' : plainContent; - // 提取关键词 + // Extract keywords const keywords = plainContent .toLowerCase() .split(/\s+/) @@ -86,7 +86,7 @@ function extractContent(filePath: string): SearchDocument { }, []) .slice(0, 20); - // 提取标题层级 + // Extract title hierarchy const headings: Array<{ level: number; text: string }> = []; const headingMatches = markdownContent.match(/^#{1,6}\s+.+$/gm); if (headingMatches) { @@ -97,7 +97,7 @@ function extractContent(filePath: string): SearchDocument { }); } - // 确定文档类型和类别 + // Determine document type and category let docType = 'documentation'; let category = 'general'; @@ -138,17 +138,17 @@ export async function GET() { try { const rootDir = process.cwd(); - // 只获取preview目录下的文档和博客文件 + // Only get documents and blog files in the preview directory const allFiles: string[] = []; - // 获取docs/en/preview下的文件 + // Get files in docs/en/preview const previewDir = join(rootDir, 'docs', 'en', 'preview'); if (existsSync(previewDir)) { const previewFiles = getAllMdxFiles(previewDir, 'docs/en/preview'); allFiles.push(...previewFiles.map((file) => join(rootDir, file))); } - // 获取blogs/en下的文件 + // Get files in blogs/en const blogsEnDir = join(rootDir, 'blogs', 'en'); if (existsSync(blogsEnDir)) { const blogFiles = getAllMdxFiles(blogsEnDir, 'blogs/en'); diff --git a/src/constants/versions.ts b/src/constants/versions.ts index d26a09f2..0069715d 100644 --- a/src/constants/versions.ts +++ b/src/constants/versions.ts @@ -8,28 +8,29 @@ export const versionOptions = [ }, { id: 'beta', - label: 'v1.0.1-beta.19', - value: 'v1.0.1-beta.19', + label: 'v1.0.1', + value: 'v1.0.1', description: 'Latest beta release', }, { id: 'previous', - label: 'v0.9.3', - value: 'v0.9.3', + label: 'v0.9.5', + value: 'v0.9.5', description: 'Previous stable release', }, ]; -// 文档路径到版本的映射 +// Document path to version mapping export const docPathToVersion: Record = { + 'preview': 'v1.0.1', 'release-1_0': 'v1.0.0', - preview: 'v1.0.1-beta.19', - 'release-0_9': 'v0.9.3', // 添加其他路径映射 + 'release-1_0_1': 'v1.0.1', + 'release-0_9': 'v0.9.5', // Add other path mappings }; -// 从路径推断版本 +// Infer version from path export function getVersionFromPath(pathname: string): string { - // 提取文档版本路径,如 /docs/release-1_0/... -> release-1_0 + // Extract document version path, e.g. /docs/release-1_0/... -> release-1_0 const pathSegments = pathname.split('/'); const docVersionPath = pathSegments[2]; // /docs/[version]/... diff --git a/yarn.lock b/yarn.lock index 6d2a39a0..bb0f3bed 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1190,6 +1190,11 @@ dependencies: unified "^11.0.0" +"@types/semver@^7.7.1": + version "7.7.1" + resolved "https://registry.npmjs.org/@types/semver/-/semver-7.7.1.tgz#3ce3af1a5524ef327d2da9e4fd8b6d95c8d70528" + integrity sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA== + "@types/trusted-types@^2.0.7": version "2.0.7" resolved "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz#baccb07a970b91707df3a3e8ba6896c57ead2d11" From 05687cc7863b1cb832303d25f86b48c081cc8584 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Tue, 16 Sep 2025 14:45:33 +0800 Subject: [PATCH 2/6] chore: add kb 101 release notes --- .../release_notes/release-10/100-cn.mdx | 187 ------------------ .../release_notes/release-10/100.mdx | 2 +- .../release_notes/release-10/101.mdx | 59 ++++++ .../release_notes/release-09/094.mdx | 2 +- .../release_notes/release-10/100-cn.mdx | 187 ------------------ .../release_notes/release-10/100.mdx | 4 +- .../release_notes/release-10/101.mdx | 59 ++++++ .../release_notes/release-10/100-cn.mdx | 187 ------------------ .../release_notes/release-10/100.mdx | 2 +- .../release_notes/release-10/101.mdx | 59 ++++++ 10 files changed, 182 insertions(+), 566 deletions(-) delete mode 100644 docs/en/preview/user_docs/release_notes/release-10/100-cn.mdx create mode 100644 docs/en/preview/user_docs/release_notes/release-10/101.mdx delete mode 100644 docs/en/release-1_0/user_docs/release_notes/release-10/100-cn.mdx create mode 100644 docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx delete mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx create mode 100644 docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx diff --git a/docs/en/preview/user_docs/release_notes/release-10/100-cn.mdx b/docs/en/preview/user_docs/release_notes/release-10/100-cn.mdx deleted file mode 100644 index 0053618a..00000000 --- a/docs/en/preview/user_docs/release_notes/release-10/100-cn.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: v1.0.0-cn -description: Release Notes v1.0.0 -keywords: [kubeblocks, release notes] -sidebar_position: 1 -hidden: true ---- - -# KubeBlocks 1.0.0 (2025-05-28) - -我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 - -KubeBlocks 1.0.0 标志着项目发展的重要里程碑,核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现重大增强,为生产环境带来更高的灵活性和可靠性。 - -## Highlights - -### APIs Graduated to Stable - -在 KubeBlocks v0.9 中引入的诸多核心能力(如灵活拓扑、InstanceSet、生命周期扩展)已在 KubeBlocks 1.0 中正式稳定。 - -以下 CRD 现在为 **`v1` 且已稳定**,将长期支持: - -**`apps.kubeblocks.io` API 组:** - -* `ClusterDefinition` -* `Cluster` -* `ComponentDefinition` -* `Component` -* `ComponentVersion` -* `ServiceDescriptor` -* `ShardingDefinition` -* `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组:** - -* `InstanceSet` - -### KubeBlocks Features - -* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 -* **增量备份**:新增增量备份支持,提升性能并减少存储占用 - -### KubeBlocks Addon Highlights - -* **MySQL 改进**:新增 TLS 支持、基于 ProxySQL 的组复制和 WAL-G 实现的 PITR 功能,显著提升安全性和恢复能力 -* **MongoDB PITR 与版本支持**:为 MongoDB 引入时间点恢复功能并支持新版本 -* **Kafka 优化**:支持外部 ZooKeeper、自定义 Prometheus 指标和多网络访问,提升灵活性和可观测性 -* **Redis 增强**:新增集群切换、实例重建和外部配置支持,提高运维健壮性 - ---- - -## What's Changed - -### KubeBlocks - -#### 集群管理 - -* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) -* **动态镜像仓库**:支持动态替换镜像仓库实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) -* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) -* **Pod 标签/注解更新**:可更新底层 Pod 标签与注解,增强运维能力 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) -* **PVC 卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) -* **组件定义策略规则**:新增细粒度策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) -* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) - -#### 数据保护 - -* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) -* **备份参数一致性**:支持备份与恢复参数,确保一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) -* **保留最近备份**:支持保留最新备份,提升恢复能力 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) - -#### 运维 - -* **OpsRequest 验证**:引入验证策略确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) - ---- - -### KubeBlocks Addons - -#### MySQL - -* **TLS 支持**:新增安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) -* **组复制 + ProxySQL**:支持高可用架构 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) -* **PITR 恢复**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) -* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) - -#### Redis - -* **集群切换与哨兵优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) -* **实例重建**:支持 Redis 实例重建 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) - -#### MongoDB - -* **PITR 恢复**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) -* **新版本支持**:新增 MongoDB 8.0.8 和 8.0.6 版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) - -#### Kafka - -* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) -* **自定义 Prometheus 指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) -* **跳过端口解析**:提升使用 Pod IP 时的灵活性 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) -* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) - -#### RabbitMQ - -* **新版本支持**:新增 RabbitMQ 4.0.9 版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) - -#### ClickHouse - -* **22.9.4 支持**:新增 ClickHouse 22.9.4 兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) - -#### TiDB - -* **8.4 版本支持**:新增 TiDB 8.4 支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) -* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) - ---- - -### API 更新、废弃与新增 - -#### GA 稳定版(v1)发布 - -以下 CRDs **正式升级至 `v1`**,将获得长期支持: - -**`apps.kubeblocks.io` API 组** - -- `ClusterDefinition` -- `Cluster` -- `ComponentDefinition` -- `Component` -- `ComponentVersion` -- `ServiceDescriptor` -- `ShardingDefinition` -- `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组** - -- `InstanceSet` - -> 这些资源的 `v1alpha1` 和 `v1beta1` 版本现已弃用,可能在后续版本中移除。 - -#### 废弃 API - -以下 CRD 已弃用并将在**后续版本中移除**,请相应调整您的配置: - -* `ConfigConstraint` -* `Configuration` - -> 这些资源将不再维护或更新。 - -#### 新增 Alpha API - -新增 **`parameters.kubeblocks.io`** API 组,引入更细粒度的参数管理API: - -* `ComponentParameter` -* `ParamConfigRenderer` -* `Parameter` -* `ParametersDefinition` - -> 这些 API 旨在替代已弃用的 `ConfigConstraint` 和 `Configuration`。 - -#### API 组调整 - -部分 API 已迁移至新的分组: - -| 资源 | 原 API 组 | 新 API 组 | -| -------------------------- | ---------------- | -------------------------- | -| `OpsDefinition`/`OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | -| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | - -## 升级注意事项 - -:::note -请注意,目前不支持从 0.9 版本直接升级到 1.0 版本。 -::: - -我们正在开发一个稳健且经过测试的升级路径,将在后续版本中发布。 - -## 致谢 - -值此 KubeBlocks 1.0.0 发布之际,我们要向所有工程师、贡献者和合作伙伴致以诚挚的感谢,正是你们的努力塑造了项目的今天。 - -特别感谢快手、中国移动云、唯品会、腾讯、360 等团队, 他们的技术贡献、深入反馈和真实使用场景大大提升了项目架构、性能和生产可用性。 - -我们也衷心感谢社区贡献者——你们的代码贡献、问题报告、讨论和评审对推动项目质量和创新至关重要。 - -随着 1.0.0 的到来,我们在构建云原生数据库平台的征途上迈出了坚实一步。我们期待与大家一同继续前行,壮大生态、迎接挑战、共同推动数据基础设施的未来发展。 diff --git a/docs/en/preview/user_docs/release_notes/release-10/100.mdx b/docs/en/preview/user_docs/release_notes/release-10/100.mdx index 685169b9..4186f660 100644 --- a/docs/en/preview/user_docs/release_notes/release-10/100.mdx +++ b/docs/en/preview/user_docs/release_notes/release-10/100.mdx @@ -2,7 +2,7 @@ title: v1.0.0 description: Release Notes v1.0.0 keywords: [kubeblocks, release notes] -sidebar_position: 1 +sidebar_position: 10 --- # KubeBlocks 1.0.0 (2025-05-28) diff --git a/docs/en/preview/user_docs/release_notes/release-10/101.mdx b/docs/en/preview/user_docs/release_notes/release-10/101.mdx new file mode 100644 index 00000000..fd306953 --- /dev/null +++ b/docs/en/preview/user_docs/release_notes/release-10/101.mdx @@ -0,0 +1,59 @@ +--- +title: v1.0.1 +description: Release Notes v1.0.1 +keywords: [kubeblocks, release notes] +sidebar_position: 9 +--- + +# KubeBlocks 1.0.1 (2025-09-12) +We are delighted to announce the release of KubeBlocks v1.0.1. This release includes several new features, bug fixes, and various improvements. Here are the detailed update contents. + +## KubeBlocks + +### Features + +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) + + +## KubeBlocks Addons + +### PostgreSQL +- Improve PostgreSQL dcs mode ([#1874](https://github.com/apecloud/kubeblocks-addons/pull/1874)) +- Support synchronous mode ([#1941](https://github.com/apecloud/kubeblocks-addons/pull/1941)) +- Update patroni scope value and dcs scope ([#2005](https://github.com/apecloud/kubeblocks-addons/pull/2005),[#1997](https://github.com/apecloud/kubeblocks-addons/pull/1997)) + +### Redis +- Support new Redis verions 8.2,8.0.3, 7.2.10 and 7.4.5/([#1968](https://github.com/apecloud/kubeblocks-addons/pull/1968), [#1785](https://github.com/apecloud/kubeblocks-addons/pull/1785), [#1710](https://github.com/apecloud/kubeblocks-addons/pull/1710)) +- Fixed Redis memory leak when loading redisgears module ([#1855](https://github.com/apecloud/kubeblocks-addons/pull/1855)) +- Support reconfiguration of `client-output-buffer-limit` ([#1973](https://github.com/apecloud/kubeblocks-addons/pull/1973)) + +### Clickhouse +- Support Horizontal Scale for keeper ([#1713](https://github.com/apecloud/kubeblocks-addons/pull/1713)) + +### Elasticsearch +- Support new ES version 8.15.5 ([#1923](https://github.com/apecloud/kubeblocks-addons/pull/1923)) +- Split master and data component to be more flexible ([#1971](https://github.com/apecloud/kubeblocks-addons/pull/1971)) + +### Etcd +- Support new Etcd version 3.6.1 ([#1709](https://github.com/apecloud/kubeblocks-addons/pull/1709)) + +### MinIO +- Add dashboard for MinIO ([#1833](https://github.com/apecloud/kubeblocks-addons/pull/1833)) +- Improve MinIO for metrics collection ([#1836](https://github.com/apecloud/kubeblocks-addons/pull/1836)) + +### RocketMQ +- Support RocketMQ Addon, for RocketMQ 4.9.6 ([#1883](https://github.com/apecloud/kubeblocks-addons/pull/1883)) + +### Neo4j +- Support Neo4j Addon, for Neo4j 4.4.42/5.26.5 ([#1649](https://github.com/apecloud/kubeblocks-addons/pull/1649)) + + +## Full Changelog +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v1.0.0...v1.0.1) for additional changes. \ No newline at end of file diff --git a/docs/en/release-1_0/user_docs/release_notes/release-09/094.mdx b/docs/en/release-1_0/user_docs/release_notes/release-09/094.mdx index 5eea4581..3c1a156f 100644 --- a/docs/en/release-1_0/user_docs/release_notes/release-09/094.mdx +++ b/docs/en/release-1_0/user_docs/release_notes/release-09/094.mdx @@ -68,7 +68,7 @@ We are delighted to announce the release of KubeBlocks v0.9.4. This release incl ## Upgrade to v0.9.4 -Refer to [Upgrade to KubeBlocks v0.9.x](https://kubeblocks.io/docs/release-1_0/user_docs/upgrade/upgrade-to-v09-version). +Refer to [Upgrade to KubeBlocks v0.9.x](../../upgrade/upgrade-to-v09-version). ## Full Changelog diff --git a/docs/en/release-1_0/user_docs/release_notes/release-10/100-cn.mdx b/docs/en/release-1_0/user_docs/release_notes/release-10/100-cn.mdx deleted file mode 100644 index f25422b6..00000000 --- a/docs/en/release-1_0/user_docs/release_notes/release-10/100-cn.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: v1.0.0-cn -description: Release Notes v1.0.0 -keywords: [kubeblocks,release notes] -sidebar_position: 1 -hidden: true ---- - -# KubeBlocks 1.0.0 (2025-05-28) - -我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 - -KubeBlocks 1.0.0 标志着项目发展的重要里程碑,核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现重大增强,为生产环境带来更高的灵活性和可靠性。 - -## Highlights - -### APIs Graduated to Stable - -在 KubeBlocks v0.9 中引入的诸多核心能力(如灵活拓扑、InstanceSet、生命周期扩展)已在 KubeBlocks 1.0 中正式稳定。 - -以下 CRD 现在为 **`v1` 且已稳定**,将长期支持: - -**`apps.kubeblocks.io` API 组:** - -* `ClusterDefinition` -* `Cluster` -* `ComponentDefinition` -* `Component` -* `ComponentVersion` -* `ServiceDescriptor` -* `ShardingDefinition` -* `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组:** - -* `InstanceSet` - -### KubeBlocks Features - -* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 -* **增量备份**:新增增量备份支持,提升性能并减少存储占用 - -### KubeBlocks Addon Highlights - -* **MySQL 改进**:新增 TLS 支持、基于 ProxySQL 的组复制和 WAL-G 实现的 PITR 功能,显著提升安全性和恢复能力 -* **MongoDB PITR 与版本支持**:为 MongoDB 引入时间点恢复功能并支持新版本 -* **Kafka 优化**:支持外部 ZooKeeper、自定义 Prometheus 指标和多网络访问,提升灵活性和可观测性 -* **Redis 增强**:新增集群切换、实例重建和外部配置支持,提高运维健壮性 - ---- - -## What's Changed - -### KubeBlocks - -#### 集群管理 - -* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) -* **动态镜像仓库**:支持动态替换镜像仓库实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) -* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) -* **Pod 标签/注解更新**:可更新底层 Pod 标签与注解,增强运维能力 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) -* **PVC 卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) -* **组件定义策略规则**:新增细粒度策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) -* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) - -#### 数据保护 - -* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) -* **备份参数一致性**:支持备份与恢复参数,确保一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) -* **保留最近备份**:支持保留最新备份,提升恢复能力 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) - -#### 运维 - -* **OpsRequest 验证**:引入验证策略确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) - ---- - -### KubeBlocks Addons - -#### MySQL - -* **TLS 支持**:新增安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) -* **组复制 + ProxySQL**:支持高可用架构 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) -* **PITR 恢复**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) -* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) - -#### Redis - -* **集群切换与哨兵优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) -* **实例重建**:支持 Redis 实例重建 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) - -#### MongoDB - -* **PITR 恢复**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) -* **新版本支持**:新增 MongoDB 8.0.8 和 8.0.6 版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) - -#### Kafka - -* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) -* **自定义 Prometheus 指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) -* **跳过端口解析**:提升使用 Pod IP 时的灵活性 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) -* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) - -#### RabbitMQ - -* **新版本支持**:新增 RabbitMQ 4.0.9 版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) - -#### ClickHouse - -* **22.9.4 支持**:新增 ClickHouse 22.9.4 兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) - -#### TiDB - -* **8.4 版本支持**:新增 TiDB 8.4 支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) -* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) - ---- - -### API 更新、废弃与新增 - -#### GA 稳定版(v1)发布 - -以下 CRDs **正式升级至 `v1`**,将获得长期支持: - -**`apps.kubeblocks.io` API 组** - -- `ClusterDefinition` -- `Cluster` -- `ComponentDefinition` -- `Component` -- `ComponentVersion` -- `ServiceDescriptor` -- `ShardingDefinition` -- `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组** - -- `InstanceSet` - -> 这些资源的 `v1alpha1` 和 `v1beta1` 版本现已弃用,可能在后续版本中移除。 - -#### 废弃 API - -以下 CRD 已弃用并将在**后续版本中移除**,请相应调整您的配置: - -* `ConfigConstraint` -* `Configuration` - -> 这些资源将不再维护或更新。 - -#### 新增 Alpha API - -新增 **`parameters.kubeblocks.io`** API 组,引入更细粒度的参数管理API: - -* `ComponentParameter` -* `ParamConfigRenderer` -* `Parameter` -* `ParametersDefinition` - -> 这些 API 旨在替代已弃用的 `ConfigConstraint` 和 `Configuration`。 - -#### API 组调整 - -部分 API 已迁移至新的分组: - -| 资源 | 原 API 组 | 新 API 组 | -| -------------------------- | ---------------- | -------------------------- | -| `OpsDefinition`/`OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | -| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | - -## 升级注意事项 - -:::note -请注意,目前不支持从 0.9 版本直接升级到 1.0 版本。 -::: - -我们正在开发一个稳健且经过测试的升级路径,将在后续版本中发布。 - -## 致谢 - -值此 KubeBlocks 1.0.0 发布之际,我们要向所有工程师、贡献者和合作伙伴致以诚挚的感谢,正是你们的努力塑造了项目的今天。 - -特别感谢快手、中国移动云、唯品会、腾讯、360 等团队, 他们的技术贡献、深入反馈和真实使用场景大大提升了项目架构、性能和生产可用性。 - -我们也衷心感谢社区贡献者——你们的代码贡献、问题报告、讨论和评审对推动项目质量和创新至关重要。 - -随着 1.0.0 的到来,我们在构建云原生数据库平台的征途上迈出了坚实一步。我们期待与大家一同继续前行,壮大生态、迎接挑战、共同推动数据基础设施的未来发展。 diff --git a/docs/en/release-1_0/user_docs/release_notes/release-10/100.mdx b/docs/en/release-1_0/user_docs/release_notes/release-10/100.mdx index 62942fa9..4186f660 100644 --- a/docs/en/release-1_0/user_docs/release_notes/release-10/100.mdx +++ b/docs/en/release-1_0/user_docs/release_notes/release-10/100.mdx @@ -1,8 +1,8 @@ --- title: v1.0.0 description: Release Notes v1.0.0 -keywords: [kubeblocks,release notes] -sidebar_position: 1 +keywords: [kubeblocks, release notes] +sidebar_position: 10 --- # KubeBlocks 1.0.0 (2025-05-28) diff --git a/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx b/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx new file mode 100644 index 00000000..fd306953 --- /dev/null +++ b/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx @@ -0,0 +1,59 @@ +--- +title: v1.0.1 +description: Release Notes v1.0.1 +keywords: [kubeblocks, release notes] +sidebar_position: 9 +--- + +# KubeBlocks 1.0.1 (2025-09-12) +We are delighted to announce the release of KubeBlocks v1.0.1. This release includes several new features, bug fixes, and various improvements. Here are the detailed update contents. + +## KubeBlocks + +### Features + +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) + + +## KubeBlocks Addons + +### PostgreSQL +- Improve PostgreSQL dcs mode ([#1874](https://github.com/apecloud/kubeblocks-addons/pull/1874)) +- Support synchronous mode ([#1941](https://github.com/apecloud/kubeblocks-addons/pull/1941)) +- Update patroni scope value and dcs scope ([#2005](https://github.com/apecloud/kubeblocks-addons/pull/2005),[#1997](https://github.com/apecloud/kubeblocks-addons/pull/1997)) + +### Redis +- Support new Redis verions 8.2,8.0.3, 7.2.10 and 7.4.5/([#1968](https://github.com/apecloud/kubeblocks-addons/pull/1968), [#1785](https://github.com/apecloud/kubeblocks-addons/pull/1785), [#1710](https://github.com/apecloud/kubeblocks-addons/pull/1710)) +- Fixed Redis memory leak when loading redisgears module ([#1855](https://github.com/apecloud/kubeblocks-addons/pull/1855)) +- Support reconfiguration of `client-output-buffer-limit` ([#1973](https://github.com/apecloud/kubeblocks-addons/pull/1973)) + +### Clickhouse +- Support Horizontal Scale for keeper ([#1713](https://github.com/apecloud/kubeblocks-addons/pull/1713)) + +### Elasticsearch +- Support new ES version 8.15.5 ([#1923](https://github.com/apecloud/kubeblocks-addons/pull/1923)) +- Split master and data component to be more flexible ([#1971](https://github.com/apecloud/kubeblocks-addons/pull/1971)) + +### Etcd +- Support new Etcd version 3.6.1 ([#1709](https://github.com/apecloud/kubeblocks-addons/pull/1709)) + +### MinIO +- Add dashboard for MinIO ([#1833](https://github.com/apecloud/kubeblocks-addons/pull/1833)) +- Improve MinIO for metrics collection ([#1836](https://github.com/apecloud/kubeblocks-addons/pull/1836)) + +### RocketMQ +- Support RocketMQ Addon, for RocketMQ 4.9.6 ([#1883](https://github.com/apecloud/kubeblocks-addons/pull/1883)) + +### Neo4j +- Support Neo4j Addon, for Neo4j 4.4.42/5.26.5 ([#1649](https://github.com/apecloud/kubeblocks-addons/pull/1649)) + + +## Full Changelog +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v1.0.0...v1.0.1) for additional changes. \ No newline at end of file diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx deleted file mode 100644 index 0053618a..00000000 --- a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100-cn.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: v1.0.0-cn -description: Release Notes v1.0.0 -keywords: [kubeblocks, release notes] -sidebar_position: 1 -hidden: true ---- - -# KubeBlocks 1.0.0 (2025-05-28) - -我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 - -KubeBlocks 1.0.0 标志着项目发展的重要里程碑,核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现重大增强,为生产环境带来更高的灵活性和可靠性。 - -## Highlights - -### APIs Graduated to Stable - -在 KubeBlocks v0.9 中引入的诸多核心能力(如灵活拓扑、InstanceSet、生命周期扩展)已在 KubeBlocks 1.0 中正式稳定。 - -以下 CRD 现在为 **`v1` 且已稳定**,将长期支持: - -**`apps.kubeblocks.io` API 组:** - -* `ClusterDefinition` -* `Cluster` -* `ComponentDefinition` -* `Component` -* `ComponentVersion` -* `ServiceDescriptor` -* `ShardingDefinition` -* `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组:** - -* `InstanceSet` - -### KubeBlocks Features - -* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 -* **增量备份**:新增增量备份支持,提升性能并减少存储占用 - -### KubeBlocks Addon Highlights - -* **MySQL 改进**:新增 TLS 支持、基于 ProxySQL 的组复制和 WAL-G 实现的 PITR 功能,显著提升安全性和恢复能力 -* **MongoDB PITR 与版本支持**:为 MongoDB 引入时间点恢复功能并支持新版本 -* **Kafka 优化**:支持外部 ZooKeeper、自定义 Prometheus 指标和多网络访问,提升灵活性和可观测性 -* **Redis 增强**:新增集群切换、实例重建和外部配置支持,提高运维健壮性 - ---- - -## What's Changed - -### KubeBlocks - -#### 集群管理 - -* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) -* **动态镜像仓库**:支持动态替换镜像仓库实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) -* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) -* **Pod 标签/注解更新**:可更新底层 Pod 标签与注解,增强运维能力 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) -* **PVC 卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) -* **组件定义策略规则**:新增细粒度策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) -* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) - -#### 数据保护 - -* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) -* **备份参数一致性**:支持备份与恢复参数,确保一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) -* **保留最近备份**:支持保留最新备份,提升恢复能力 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) - -#### 运维 - -* **OpsRequest 验证**:引入验证策略确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) - ---- - -### KubeBlocks Addons - -#### MySQL - -* **TLS 支持**:新增安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) -* **组复制 + ProxySQL**:支持高可用架构 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) -* **PITR 恢复**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) -* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) - -#### Redis - -* **集群切换与哨兵优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) -* **实例重建**:支持 Redis 实例重建 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) - -#### MongoDB - -* **PITR 恢复**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) -* **新版本支持**:新增 MongoDB 8.0.8 和 8.0.6 版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) - -#### Kafka - -* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) -* **自定义 Prometheus 指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) -* **跳过端口解析**:提升使用 Pod IP 时的灵活性 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) -* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) - -#### RabbitMQ - -* **新版本支持**:新增 RabbitMQ 4.0.9 版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) - -#### ClickHouse - -* **22.9.4 支持**:新增 ClickHouse 22.9.4 兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) - -#### TiDB - -* **8.4 版本支持**:新增 TiDB 8.4 支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) -* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) - ---- - -### API 更新、废弃与新增 - -#### GA 稳定版(v1)发布 - -以下 CRDs **正式升级至 `v1`**,将获得长期支持: - -**`apps.kubeblocks.io` API 组** - -- `ClusterDefinition` -- `Cluster` -- `ComponentDefinition` -- `Component` -- `ComponentVersion` -- `ServiceDescriptor` -- `ShardingDefinition` -- `SidecarDefinition` - -**`workloads.kubeblocks.io` API 组** - -- `InstanceSet` - -> 这些资源的 `v1alpha1` 和 `v1beta1` 版本现已弃用,可能在后续版本中移除。 - -#### 废弃 API - -以下 CRD 已弃用并将在**后续版本中移除**,请相应调整您的配置: - -* `ConfigConstraint` -* `Configuration` - -> 这些资源将不再维护或更新。 - -#### 新增 Alpha API - -新增 **`parameters.kubeblocks.io`** API 组,引入更细粒度的参数管理API: - -* `ComponentParameter` -* `ParamConfigRenderer` -* `Parameter` -* `ParametersDefinition` - -> 这些 API 旨在替代已弃用的 `ConfigConstraint` 和 `Configuration`。 - -#### API 组调整 - -部分 API 已迁移至新的分组: - -| 资源 | 原 API 组 | 新 API 组 | -| -------------------------- | ---------------- | -------------------------- | -| `OpsDefinition`/`OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | -| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | - -## 升级注意事项 - -:::note -请注意,目前不支持从 0.9 版本直接升级到 1.0 版本。 -::: - -我们正在开发一个稳健且经过测试的升级路径,将在后续版本中发布。 - -## 致谢 - -值此 KubeBlocks 1.0.0 发布之际,我们要向所有工程师、贡献者和合作伙伴致以诚挚的感谢,正是你们的努力塑造了项目的今天。 - -特别感谢快手、中国移动云、唯品会、腾讯、360 等团队, 他们的技术贡献、深入反馈和真实使用场景大大提升了项目架构、性能和生产可用性。 - -我们也衷心感谢社区贡献者——你们的代码贡献、问题报告、讨论和评审对推动项目质量和创新至关重要。 - -随着 1.0.0 的到来,我们在构建云原生数据库平台的征途上迈出了坚实一步。我们期待与大家一同继续前行,壮大生态、迎接挑战、共同推动数据基础设施的未来发展。 diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx index 685169b9..4186f660 100644 --- a/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/100.mdx @@ -2,7 +2,7 @@ title: v1.0.0 description: Release Notes v1.0.0 keywords: [kubeblocks, release notes] -sidebar_position: 1 +sidebar_position: 10 --- # KubeBlocks 1.0.0 (2025-05-28) diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx new file mode 100644 index 00000000..aa8ccc20 --- /dev/null +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx @@ -0,0 +1,59 @@ +--- +title: v1.0.1 +description: Release Notes v1.0.1 +keywords: [kubeblocks, release notes] +sidebar_position: 9 +--- + +# KubeBlocks 1.0.1 (2025-09-12) +We are delighted to announce the release of KubeBlocks v1.0.1. This release includes several new features, bug fixes, and various improvements. Here are the detailed update contents. + +## KubeBlocks + +### Features + +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) + + +## KubeBlocks Addons + +### PostgreSQL +- Improve PostgreSQL dcs mode ([#1874](https://github.com/apecloud/kubeblocks-addons/pull/1874)) +- Support synchronous mode ([#1941](https://github.com/apecloud/kubeblocks-addons/pull/1941)) +- Update patroni scope value and dcs scope ([#2005](https://github.com/apecloud/kubeblocks-addons/pull/2005),[#1997](https://github.com/apecloud/kubeblocks-addons/pull/1997)) + +### Redis +- Support new Redis verions 8.2,8.0.3, 7.2.10 and 7.4.5/([#1968](https://github.com/apecloud/kubeblocks-addons/pull/1968), [#1785](https://github.com/apecloud/kubeblocks-addons/pull/1785), [#1710](https://github.com/apecloud/kubeblocks-addons/pull/1710)) +- Fixed Redis memory leak when loading redisgears module ([#1855](https://github.com/apecloud/kubeblocks-addons/pull/1855)) +- Support reconfiguration on Redis cluster for client-output-buffer-limit ([#1973](https://github.com/apecloud/kubeblocks-addons/pull/1973)) + +### Clickhouse +- Support Horizontal Scale for keeper ([#1713](https://github.com/apecloud/kubeblocks-addons/pull/1713)) + +### Elasticsearch +- Support new ES version 8.15.5 ([#1923](https://github.com/apecloud/kubeblocks-addons/pull/1923)) +- Split master and data component to be more flexible ([#1971](https://github.com/apecloud/kubeblocks-addons/pull/1971)) + +### Etcd +- Support new Etcd version 3.6.1 ([#1709](https://github.com/apecloud/kubeblocks-addons/pull/1709)) + +### MinIO +- Add dashboard for MinIO ([#1833](https://github.com/apecloud/kubeblocks-addons/pull/1833)) +- Improve MinIO for metrics collection ([#1836](https://github.com/apecloud/kubeblocks-addons/pull/1836)) + +### RocketMQ +- Support RocketMQ Addon, for RocketMQ 4.9.6 ([#1883](https://github.com/apecloud/kubeblocks-addons/pull/1883)) + +### Neo4j +- Support Neo4j Addon, for Neo4j 4.4.42/5.26.5 ([#1649](https://github.com/apecloud/kubeblocks-addons/pull/1649)) + + +## Full Changelog +You can check the [full changelog](https://github.com/apecloud/kubeblocks/compare/v1.0.0...v1.0.1) for additional changes. \ No newline at end of file From 15494fa17b2394e4b74df5714839da8103237e72 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Tue, 16 Sep 2025 16:15:39 +0800 Subject: [PATCH 3/6] update kbcli docs --- docs/en/preview/cli/_category_.yml | 5 -- .../preview/cli/kbcli_cluster_create_etcd.mdx | 2 + .../cli/kbcli_cluster_create_kafka.mdx | 5 +- .../cli/kbcli_cluster_create_mongodb.mdx | 5 +- .../cli/kbcli_cluster_create_mysql.mdx | 6 +- .../preview/cli/kbcli_cluster_custom-ops.mdx | 9 +++ .../kbcli_cluster_custom-ops_kafka-quota.mdx | 67 +++++++++++++++++ .../kbcli_cluster_custom-ops_kafka-topic.mdx | 74 +++++++++++++++++++ ...bcli_cluster_custom-ops_kafka-user-acl.mdx | 73 ++++++++++++++++++ ...er_custom-ops_pg-update-standby-config.mdx | 64 ++++++++++++++++ ...er_custom-ops_redis-cluster-rebalance.mdx} | 22 ++++-- ...er_custom-ops_redis-master-account-ops.mdx | 67 +++++++++++++++++ ...cluster_custom-ops_redis-reset-master.mdx} | 27 ++++--- ..._custom-ops_redis-sentinel-account-ops.mdx | 67 +++++++++++++++++ ...ter_custom-ops_redis-shard-account-ops.mdx | 69 +++++++++++++++++ docs/en/preview/cli/kbcli_playground_init.mdx | 3 - docs/en/release-1_0_1/cli/_category_.yml | 5 -- .../cli/kbcli_cluster_create_etcd.mdx | 2 + .../cli/kbcli_cluster_create_kafka.mdx | 5 +- .../cli/kbcli_cluster_create_mongodb.mdx | 5 +- .../cli/kbcli_cluster_create_mysql.mdx | 6 +- .../cli/kbcli_cluster_custom-ops.mdx | 9 +++ .../kbcli_cluster_custom-ops_kafka-quota.mdx | 67 +++++++++++++++++ .../kbcli_cluster_custom-ops_kafka-topic.mdx | 74 +++++++++++++++++++ ...bcli_cluster_custom-ops_kafka-user-acl.mdx | 73 ++++++++++++++++++ ...er_custom-ops_pg-update-standby-config.mdx | 64 ++++++++++++++++ ...er_custom-ops_redis-cluster-rebalance.mdx} | 22 ++++-- ...er_custom-ops_redis-master-account-ops.mdx | 67 +++++++++++++++++ ...cluster_custom-ops_redis-reset-master.mdx} | 27 ++++--- ..._custom-ops_redis-sentinel-account-ops.mdx | 67 +++++++++++++++++ ...ter_custom-ops_redis-shard-account-ops.mdx | 69 +++++++++++++++++ .../cli/kbcli_playground_init.mdx | 3 - scripts/sync-kbcli-docs.sh | 2 +- 33 files changed, 1063 insertions(+), 69 deletions(-) delete mode 100644 docs/en/preview/cli/_category_.yml create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx rename docs/en/preview/cli/{kbcli_cluster_diff-config.mdx => kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx} (60%) create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx rename docs/en/{release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx => preview/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx} (60%) create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx create mode 100644 docs/en/preview/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx delete mode 100644 docs/en/release-1_0_1/cli/_category_.yml create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-quota.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-topic.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx rename docs/en/release-1_0_1/cli/{kbcli_cluster_diff-config.mdx => kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx} (60%) create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx rename docs/en/{preview/cli/kbcli_cluster_convert-to-v1.mdx => release-1_0_1/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx} (60%) create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx create mode 100644 docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx diff --git a/docs/en/preview/cli/_category_.yml b/docs/en/preview/cli/_category_.yml deleted file mode 100644 index dff53aca..00000000 --- a/docs/en/preview/cli/_category_.yml +++ /dev/null @@ -1,5 +0,0 @@ -position: 30 -label: Command Line -collapsible: true -collapsed: true -className: hide-children \ No newline at end of file diff --git a/docs/en/preview/cli/kbcli_cluster_create_etcd.mdx b/docs/en/preview/cli/kbcli_cluster_create_etcd.mdx index 4ea3d7e6..22729a58 100644 --- a/docs/en/preview/cli/kbcli_cluster_create_etcd.mdx +++ b/docs/en/preview/cli/kbcli_cluster_create_etcd.mdx @@ -35,11 +35,13 @@ kbcli cluster create etcd NAME [flags] --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of replicas, the default replicas is 3. Value range [1, 5]. (default 3) --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --storage-class-name string The name of the StorageClass to use for data storage. --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") --tls-enable Enable TLS for etcd cluster --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' --topology-keys stringArray Topology keys for affinity + --version string etcd Service Version. (default "3.6.1") ``` ### Options inherited from parent commands diff --git a/docs/en/preview/cli/kbcli_cluster_create_kafka.mdx b/docs/en/preview/cli/kbcli_cluster_create_kafka.mdx index 49980c8e..b3e3ebef 100644 --- a/docs/en/preview/cli/kbcli_cluster_create_kafka.mdx +++ b/docs/en/preview/cli/kbcli_cluster_create_kafka.mdx @@ -34,11 +34,11 @@ kbcli cluster create kafka NAME [flags] --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) --meta-storage float Metadata Storage size, the unit is Gi. Value range [1, 10000]. (default 5) --meta-storage-class string The StorageClass for Kafka Metadata Storage. - --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated]. (default "combined") + --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated, withZookeeper-10]. (default "combined") --monitor-enable Enable monitor for Kafka. (default true) --monitor.limit.cpu float (default 0.5) --monitor.limit.memory float (default 1) - --monitor.replicas int Number of replicas for the monitor component. Value range [1]. (default 1) + --monitor.replicas float Number of replicas for the monitor component. Value range [1]. (default 1) --monitor.request.cpu float (default 0.1) --monitor.request.memory float (default 0.2) --node-labels stringToString Node label selector (default []) @@ -47,6 +47,7 @@ kbcli cluster create kafka NAME [flags] --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of Kafka broker replicas for combined mode. Legal values [1, 3, 5]. (default 1) --sasl-enable Enable authentication using SASL/PLAIN for Kafka. + --sasl-scram-enable Enable authentication using SASL/SCRAM for Kafka. --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) --storage-class string The StorageClass for Kafka Data Storage. --storage-enable Enable storage for Kafka. diff --git a/docs/en/preview/cli/kbcli_cluster_create_mongodb.mdx b/docs/en/preview/cli/kbcli_cluster_create_mongodb.mdx index 96b9b695..48f226bb 100644 --- a/docs/en/preview/cli/kbcli_cluster_create_mongodb.mdx +++ b/docs/en/preview/cli/kbcli_cluster_create_mongodb.mdx @@ -28,18 +28,19 @@ kbcli cluster create mongodb NAME [flags] -h, --help help for mongodb --hostnetwork string Legal values [enabled, disabled]. (default "enabled") --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) - --mode string Cluster topology mode. Legal values [standalone, replicaset]. (default "standalone") + --mode string Cluster topology mode. Legal values [standalone, replicaset, sharding]. (default "standalone") --node-labels stringToString Node label selector (default []) -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --shards int The number of shards, for sharding mode, the default shards is 3. Value range [1, 128]. (default 3) --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) --storage-class-name string Storage class name of the data volume --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' --topology-keys stringArray Topology keys for affinity - --version string Cluster version. Legal values [8.0.8, 8.0.6, 8.0.4, 7.0.19, 7.0.16, 7.0.12, 6.0.22, 6.0.20, 6.0.16, 5.0.30, 5.0.28, 4.4.29, 4.2.24, 4.0.28]. (default "6.0.16") + --version string Cluster version. Legal values [8.0.8, 7.0.18, 6.0.21, 5.0.29, 4.4.29, 4.2.25, 4.0.28]. (default "6.0.21") ``` ### Options inherited from parent commands diff --git a/docs/en/preview/cli/kbcli_cluster_create_mysql.mdx b/docs/en/preview/cli/kbcli_cluster_create_mysql.mdx index 9cbe561e..d58a7fcf 100644 --- a/docs/en/preview/cli/kbcli_cluster_create_mysql.mdx +++ b/docs/en/preview/cli/kbcli_cluster_create_mysql.mdx @@ -33,9 +33,9 @@ kbcli cluster create mysql NAME [flags] --orchestrator.service-reference.endpoint string Endpoint name of the service reference, format: : -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") - --proxysql.cpu float (default 1) - --proxysql.memory float Memory, the unit is Gi. (default 1) - --proxysql.replicas int (default 1) + --proxysql.cpu float Proxysql CPU cores. (default 1) + --proxysql.memory float Proxysql Memory, the unit is Gi. (default 1) + --proxysql.replicas int The number of replicas for Proxysql. (default 1) --replicas int The number of replicas. Value range [1, 5]. (default 1) --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops.mdx index 8bd6e19e..31399156 100644 --- a/docs/en/preview/cli/kbcli_cluster_custom-ops.mdx +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops.mdx @@ -57,6 +57,15 @@ kbcli cluster custom-ops OpsDef --cluster [fl ### SEE ALSO * [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops kafka-quota](kbcli_cluster_custom-ops_kafka-quota.md) - Create a custom ops with opsDef kafka-quota +* [kbcli cluster custom-ops kafka-topic](kbcli_cluster_custom-ops_kafka-topic.md) - Create a custom ops with opsDef kafka-topic +* [kbcli cluster custom-ops kafka-user-acl](kbcli_cluster_custom-ops_kafka-user-acl.md) - Create a custom ops with opsDef kafka-user-acl +* [kbcli cluster custom-ops pg-update-standby-config](kbcli_cluster_custom-ops_pg-update-standby-config.md) - Create a custom ops with opsDef pg-update-standby-config +* [kbcli cluster custom-ops redis-cluster-rebalance](kbcli_cluster_custom-ops_redis-cluster-rebalance.md) - Create a custom ops with opsDef redis-cluster-rebalance +* [kbcli cluster custom-ops redis-master-account-ops](kbcli_cluster_custom-ops_redis-master-account-ops.md) - Create a custom ops with opsDef redis-master-account-ops +* [kbcli cluster custom-ops redis-reset-master](kbcli_cluster_custom-ops_redis-reset-master.md) - Create a custom ops with opsDef redis-reset-master +* [kbcli cluster custom-ops redis-sentinel-account-ops](kbcli_cluster_custom-ops_redis-sentinel-account-ops.md) - Create a custom ops with opsDef redis-sentinel-account-ops +* [kbcli cluster custom-ops redis-shard-account-ops](kbcli_cluster_custom-ops_redis-shard-account-ops.md) - Create a custom ops with opsDef redis-shard-account-ops #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx new file mode 100644 index 00000000..12cdc739 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops kafka-quota +--- + +Create a custom ops with opsDef kafka-quota + +``` +kbcli cluster custom-ops kafka-quota [flags] +``` + +### Examples + +``` + # Create a kafka-quota ops + kbcli cluster custom-ops kafka-quota --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --client string client id. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer-byte-rate int the maximum number of messages that can be consumed per second, measured in bytes/sec + --controller-mutation-rate int partition mutation quota to control the rate at which mutations are accepted for user requests. + --delete-quotas stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-quota + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --producer-byte-rate int the maximum number of messages that can be produced per second, measured in bytes/sec + --request-percentage int request percentage. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --user string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx new file mode 100644 index 00000000..7e1a8c10 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster custom-ops kafka-topic +--- + +Create a custom ops with opsDef kafka-topic + +``` +kbcli cluster custom-ops kafka-topic [flags] +``` + +### Examples + +``` + # Create a kafka-topic ops + kbcli cluster custom-ops kafka-topic --component --topic= --type= +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --config string A topic configuration override for the topic being created or altered. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-topic + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --partitions int The number of partitions for the topic being created or altered (WARNING: + If partitions are increased for a topic that has a key, the partition logic or ordering + of the messages will be affected). If not supplied for create, defaults to the cluster default. + Value range [1, 10000]. + --replicas int The replication factor for each partition in the topic being + created. If not supplied, defaults to the cluster default. + Value range [1, 10]. + --topic string The topic to create, alter or delete. It also accepts a regular + expression, except for --create option. Put topic name in double quotes and + use the '\' prefix to escape regular expression symbols; e.g. "test\.topic". + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string operation type, supports value: [create, alter, delete]. Legal values [create, alter, delete]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx new file mode 100644 index 00000000..cc0074e4 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster custom-ops kafka-user-acl +--- + +Create a custom ops with opsDef kafka-user-acl + +``` +kbcli cluster custom-ops kafka-user-acl [flags] +``` + +### Examples + +``` + # Create a kafka-user-acl ops + kbcli cluster custom-ops kafka-user-acl --component --operations= --type= +``` + +### Options + +``` + --allow-hosts stringArray + --allow-users stringArray + --auto-approve Skip interactive approval before promote the instance + --cluster string Indicates to the script that the user is trying to interact with acls on the singular cluster resource. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer indicate to add or remove the acl of consumer. + --deny-hosts stringArray + --deny-users stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --group string consumer-group. + -h, --help help for kafka-user-acl + --name string OpsRequest name. if not specified, it will be randomly generated + --operations stringArray + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pattern-type string Resource pattern type. + --producer indicate to add or remove the acl of producer. + --topic string topic name. + --transactional-id string The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx new file mode 100644 index 00000000..0088c238 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster custom-ops pg-update-standby-config +--- + +Create a custom ops with opsDef pg-update-standby-config + +``` +kbcli cluster custom-ops pg-update-standby-config [flags] +``` + +### Examples + +``` + # Create a pg-update-standby-config ops + kbcli cluster custom-ops pg-update-standby-config --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for pg-update-standby-config + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --primary-endpoint string primaryEndpoint defines endpoint of the primary instance.empty string means no primary instance. + The format is {host}:{port} + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_diff-config.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx similarity index 60% rename from docs/en/preview/cli/kbcli_cluster_diff-config.mdx rename to docs/en/preview/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx index efd0217f..2353be18 100644 --- a/docs/en/preview/cli/kbcli_cluster_diff-config.mdx +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx @@ -1,24 +1,32 @@ --- -title: kbcli cluster diff-config +title: kbcli cluster custom-ops redis-cluster-rebalance --- -Show the difference in parameters between the two submitted OpsRequest. +Create a custom ops with opsDef redis-cluster-rebalance ``` -kbcli cluster diff-config [flags] +kbcli cluster custom-ops redis-cluster-rebalance [flags] ``` ### Examples ``` - # compare config files - kbcli cluster diff-config opsrequest1 opsrequest2 + # Create a redis-cluster-rebalance ops + kbcli cluster custom-ops redis-cluster-rebalance --component ``` ### Options ``` - -h, --help help for diff-config + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-cluster-rebalance + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed ``` ### Options inherited from parent commands @@ -47,7 +55,7 @@ kbcli cluster diff-config [flags] ### SEE ALSO -* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx new file mode 100644 index 00000000..cc2d577b --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops redis-master-account-ops +--- + +Create a custom ops with opsDef redis-master-account-ops + +``` +kbcli cluster custom-ops redis-master-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-master-account-ops ops + kbcli cluster custom-ops redis-master-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-master-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx similarity index 60% rename from docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx rename to docs/en/preview/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx index ef4dbaf0..30c8fbc6 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_convert-to-v1.mdx +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx @@ -1,29 +1,32 @@ --- -title: kbcli cluster convert-to-v1 +title: kbcli cluster custom-ops redis-reset-master --- -convert cluster api version. +Create a custom ops with opsDef redis-reset-master ``` -kbcli cluster convert-to-v1 [NAME] [flags] +kbcli cluster custom-ops redis-reset-master [flags] ``` ### Examples ``` - # convert a v1alpha1 cluster - kbcli cluster convert-to-v1 mycluster - - # convert a v1alpha1 cluster with --dry-run - kbcli cluster convert-to-v1 mycluster --dry-run + # Create a redis-reset-master ops + kbcli cluster custom-ops redis-reset-master --component ``` ### Options ``` - --dry-run dry run mode - -h, --help help for convert-to-v1 - --no-diff only print the new cluster yaml + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-reset-master + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed ``` ### Options inherited from parent commands @@ -52,7 +55,7 @@ kbcli cluster convert-to-v1 [NAME] [flags] ### SEE ALSO -* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx new file mode 100644 index 00000000..50046437 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops redis-sentinel-account-ops +--- + +Create a custom ops with opsDef redis-sentinel-account-ops + +``` +kbcli cluster custom-ops redis-sentinel-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-sentinel-account-ops ops + kbcli cluster custom-ops redis-sentinel-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-sentinel-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx new file mode 100644 index 00000000..a7cd51e2 --- /dev/null +++ b/docs/en/preview/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster custom-ops redis-shard-account-ops +--- + +Create a custom ops with opsDef redis-shard-account-ops + +``` +kbcli cluster custom-ops redis-shard-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-shard-account-ops ops + kbcli cluster custom-ops redis-shard-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-shard-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --shard-mode string SHARD_MODE indicates the mode of Redis cluster. When the cluster type is cluster, it must be set to "TRUE". + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_playground_init.mdx b/docs/en/preview/cli/kbcli_playground_init.mdx index 9e2924b2..e2f7e3ea 100644 --- a/docs/en/preview/cli/kbcli_playground_init.mdx +++ b/docs/en/preview/cli/kbcli_playground_init.mdx @@ -34,9 +34,6 @@ kbcli playground init [flags] kbcli exec -it mycluster-mysql-0 bash mysql -h 127.1 -u root -p$MYSQL_ROOT_PASSWORD - # view the Grafana - kbcli dashboard open kubeblocks-grafana - # destroy playground kbcli playground destroy ``` diff --git a/docs/en/release-1_0_1/cli/_category_.yml b/docs/en/release-1_0_1/cli/_category_.yml deleted file mode 100644 index dff53aca..00000000 --- a/docs/en/release-1_0_1/cli/_category_.yml +++ /dev/null @@ -1,5 +0,0 @@ -position: 30 -label: Command Line -collapsible: true -collapsed: true -className: hide-children \ No newline at end of file diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx index 4ea3d7e6..22729a58 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_etcd.mdx @@ -35,11 +35,13 @@ kbcli cluster create etcd NAME [flags] --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of replicas, the default replicas is 3. Value range [1, 5]. (default 3) --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --storage-class-name string The name of the StorageClass to use for data storage. --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") --tls-enable Enable TLS for etcd cluster --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' --topology-keys stringArray Topology keys for affinity + --version string etcd Service Version. (default "3.6.1") ``` ### Options inherited from parent commands diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx index 49980c8e..b3e3ebef 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_kafka.mdx @@ -34,11 +34,11 @@ kbcli cluster create kafka NAME [flags] --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) --meta-storage float Metadata Storage size, the unit is Gi. Value range [1, 10000]. (default 5) --meta-storage-class string The StorageClass for Kafka Metadata Storage. - --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated]. (default "combined") + --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated, withZookeeper-10]. (default "combined") --monitor-enable Enable monitor for Kafka. (default true) --monitor.limit.cpu float (default 0.5) --monitor.limit.memory float (default 1) - --monitor.replicas int Number of replicas for the monitor component. Value range [1]. (default 1) + --monitor.replicas float Number of replicas for the monitor component. Value range [1]. (default 1) --monitor.request.cpu float (default 0.1) --monitor.request.memory float (default 0.2) --node-labels stringToString Node label selector (default []) @@ -47,6 +47,7 @@ kbcli cluster create kafka NAME [flags] --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of Kafka broker replicas for combined mode. Legal values [1, 3, 5]. (default 1) --sasl-enable Enable authentication using SASL/PLAIN for Kafka. + --sasl-scram-enable Enable authentication using SASL/SCRAM for Kafka. --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) --storage-class string The StorageClass for Kafka Data Storage. --storage-enable Enable storage for Kafka. diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx index 96b9b695..48f226bb 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mongodb.mdx @@ -28,18 +28,19 @@ kbcli cluster create mongodb NAME [flags] -h, --help help for mongodb --hostnetwork string Legal values [enabled, disabled]. (default "enabled") --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) - --mode string Cluster topology mode. Legal values [standalone, replicaset]. (default "standalone") + --mode string Cluster topology mode. Legal values [standalone, replicaset, sharding]. (default "standalone") --node-labels stringToString Node label selector (default []) -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --shards int The number of shards, for sharding mode, the default shards is 3. Value range [1, 128]. (default 3) --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) --storage-class-name string Storage class name of the data volume --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' --topology-keys stringArray Topology keys for affinity - --version string Cluster version. Legal values [8.0.8, 8.0.6, 8.0.4, 7.0.19, 7.0.16, 7.0.12, 6.0.22, 6.0.20, 6.0.16, 5.0.30, 5.0.28, 4.4.29, 4.2.24, 4.0.28]. (default "6.0.16") + --version string Cluster version. Legal values [8.0.8, 7.0.18, 6.0.21, 5.0.29, 4.4.29, 4.2.25, 4.0.28]. (default "6.0.21") ``` ### Options inherited from parent commands diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx index 9cbe561e..d58a7fcf 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_create_mysql.mdx @@ -33,9 +33,9 @@ kbcli cluster create mysql NAME [flags] --orchestrator.service-reference.endpoint string Endpoint name of the service reference, format: : -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") - --proxysql.cpu float (default 1) - --proxysql.memory float Memory, the unit is Gi. (default 1) - --proxysql.replicas int (default 1) + --proxysql.cpu float Proxysql CPU cores. (default 1) + --proxysql.memory float Proxysql Memory, the unit is Gi. (default 1) + --proxysql.replicas int The number of replicas for Proxysql. (default 1) --replicas int The number of replicas. Value range [1, 5]. (default 1) --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx index 8bd6e19e..31399156 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops.mdx @@ -57,6 +57,15 @@ kbcli cluster custom-ops OpsDef --cluster [fl ### SEE ALSO * [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops kafka-quota](kbcli_cluster_custom-ops_kafka-quota.md) - Create a custom ops with opsDef kafka-quota +* [kbcli cluster custom-ops kafka-topic](kbcli_cluster_custom-ops_kafka-topic.md) - Create a custom ops with opsDef kafka-topic +* [kbcli cluster custom-ops kafka-user-acl](kbcli_cluster_custom-ops_kafka-user-acl.md) - Create a custom ops with opsDef kafka-user-acl +* [kbcli cluster custom-ops pg-update-standby-config](kbcli_cluster_custom-ops_pg-update-standby-config.md) - Create a custom ops with opsDef pg-update-standby-config +* [kbcli cluster custom-ops redis-cluster-rebalance](kbcli_cluster_custom-ops_redis-cluster-rebalance.md) - Create a custom ops with opsDef redis-cluster-rebalance +* [kbcli cluster custom-ops redis-master-account-ops](kbcli_cluster_custom-ops_redis-master-account-ops.md) - Create a custom ops with opsDef redis-master-account-ops +* [kbcli cluster custom-ops redis-reset-master](kbcli_cluster_custom-ops_redis-reset-master.md) - Create a custom ops with opsDef redis-reset-master +* [kbcli cluster custom-ops redis-sentinel-account-ops](kbcli_cluster_custom-ops_redis-sentinel-account-ops.md) - Create a custom ops with opsDef redis-sentinel-account-ops +* [kbcli cluster custom-ops redis-shard-account-ops](kbcli_cluster_custom-ops_redis-shard-account-ops.md) - Create a custom ops with opsDef redis-shard-account-ops #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-quota.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-quota.mdx new file mode 100644 index 00000000..12cdc739 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-quota.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops kafka-quota +--- + +Create a custom ops with opsDef kafka-quota + +``` +kbcli cluster custom-ops kafka-quota [flags] +``` + +### Examples + +``` + # Create a kafka-quota ops + kbcli cluster custom-ops kafka-quota --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --client string client id. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer-byte-rate int the maximum number of messages that can be consumed per second, measured in bytes/sec + --controller-mutation-rate int partition mutation quota to control the rate at which mutations are accepted for user requests. + --delete-quotas stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-quota + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --producer-byte-rate int the maximum number of messages that can be produced per second, measured in bytes/sec + --request-percentage int request percentage. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --user string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-topic.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-topic.mdx new file mode 100644 index 00000000..7e1a8c10 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-topic.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster custom-ops kafka-topic +--- + +Create a custom ops with opsDef kafka-topic + +``` +kbcli cluster custom-ops kafka-topic [flags] +``` + +### Examples + +``` + # Create a kafka-topic ops + kbcli cluster custom-ops kafka-topic --component --topic= --type= +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --config string A topic configuration override for the topic being created or altered. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-topic + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --partitions int The number of partitions for the topic being created or altered (WARNING: + If partitions are increased for a topic that has a key, the partition logic or ordering + of the messages will be affected). If not supplied for create, defaults to the cluster default. + Value range [1, 10000]. + --replicas int The replication factor for each partition in the topic being + created. If not supplied, defaults to the cluster default. + Value range [1, 10]. + --topic string The topic to create, alter or delete. It also accepts a regular + expression, except for --create option. Put topic name in double quotes and + use the '\' prefix to escape regular expression symbols; e.g. "test\.topic". + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string operation type, supports value: [create, alter, delete]. Legal values [create, alter, delete]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx new file mode 100644 index 00000000..cc0074e4 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster custom-ops kafka-user-acl +--- + +Create a custom ops with opsDef kafka-user-acl + +``` +kbcli cluster custom-ops kafka-user-acl [flags] +``` + +### Examples + +``` + # Create a kafka-user-acl ops + kbcli cluster custom-ops kafka-user-acl --component --operations= --type= +``` + +### Options + +``` + --allow-hosts stringArray + --allow-users stringArray + --auto-approve Skip interactive approval before promote the instance + --cluster string Indicates to the script that the user is trying to interact with acls on the singular cluster resource. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer indicate to add or remove the acl of consumer. + --deny-hosts stringArray + --deny-users stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --group string consumer-group. + -h, --help help for kafka-user-acl + --name string OpsRequest name. if not specified, it will be randomly generated + --operations stringArray + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pattern-type string Resource pattern type. + --producer indicate to add or remove the acl of producer. + --topic string topic name. + --transactional-id string The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx new file mode 100644 index 00000000..0088c238 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_pg-update-standby-config.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster custom-ops pg-update-standby-config +--- + +Create a custom ops with opsDef pg-update-standby-config + +``` +kbcli cluster custom-ops pg-update-standby-config [flags] +``` + +### Examples + +``` + # Create a pg-update-standby-config ops + kbcli cluster custom-ops pg-update-standby-config --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for pg-update-standby-config + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --primary-endpoint string primaryEndpoint defines endpoint of the primary instance.empty string means no primary instance. + The format is {host}:{port} + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx similarity index 60% rename from docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx rename to docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx index efd0217f..2353be18 100644 --- a/docs/en/release-1_0_1/cli/kbcli_cluster_diff-config.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-cluster-rebalance.mdx @@ -1,24 +1,32 @@ --- -title: kbcli cluster diff-config +title: kbcli cluster custom-ops redis-cluster-rebalance --- -Show the difference in parameters between the two submitted OpsRequest. +Create a custom ops with opsDef redis-cluster-rebalance ``` -kbcli cluster diff-config [flags] +kbcli cluster custom-ops redis-cluster-rebalance [flags] ``` ### Examples ``` - # compare config files - kbcli cluster diff-config opsrequest1 opsrequest2 + # Create a redis-cluster-rebalance ops + kbcli cluster custom-ops redis-cluster-rebalance --component ``` ### Options ``` - -h, --help help for diff-config + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-cluster-rebalance + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed ``` ### Options inherited from parent commands @@ -47,7 +55,7 @@ kbcli cluster diff-config [flags] ### SEE ALSO -* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx new file mode 100644 index 00000000..cc2d577b --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-master-account-ops.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops redis-master-account-ops +--- + +Create a custom ops with opsDef redis-master-account-ops + +``` +kbcli cluster custom-ops redis-master-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-master-account-ops ops + kbcli cluster custom-ops redis-master-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-master-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/preview/cli/kbcli_cluster_convert-to-v1.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx similarity index 60% rename from docs/en/preview/cli/kbcli_cluster_convert-to-v1.mdx rename to docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx index ef4dbaf0..30c8fbc6 100644 --- a/docs/en/preview/cli/kbcli_cluster_convert-to-v1.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-reset-master.mdx @@ -1,29 +1,32 @@ --- -title: kbcli cluster convert-to-v1 +title: kbcli cluster custom-ops redis-reset-master --- -convert cluster api version. +Create a custom ops with opsDef redis-reset-master ``` -kbcli cluster convert-to-v1 [NAME] [flags] +kbcli cluster custom-ops redis-reset-master [flags] ``` ### Examples ``` - # convert a v1alpha1 cluster - kbcli cluster convert-to-v1 mycluster - - # convert a v1alpha1 cluster with --dry-run - kbcli cluster convert-to-v1 mycluster --dry-run + # Create a redis-reset-master ops + kbcli cluster custom-ops redis-reset-master --component ``` ### Options ``` - --dry-run dry run mode - -h, --help help for convert-to-v1 - --no-diff only print the new cluster yaml + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-reset-master + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed ``` ### Options inherited from parent commands @@ -52,7 +55,7 @@ kbcli cluster convert-to-v1 [NAME] [flags] ### SEE ALSO -* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - #### Go Back to [CLI Overview](cli.md) Homepage. diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx new file mode 100644 index 00000000..50046437 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-sentinel-account-ops.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops redis-sentinel-account-ops +--- + +Create a custom ops with opsDef redis-sentinel-account-ops + +``` +kbcli cluster custom-ops redis-sentinel-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-sentinel-account-ops ops + kbcli cluster custom-ops redis-sentinel-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-sentinel-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx new file mode 100644 index 00000000..a7cd51e2 --- /dev/null +++ b/docs/en/release-1_0_1/cli/kbcli_cluster_custom-ops_redis-shard-account-ops.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster custom-ops redis-shard-account-ops +--- + +Create a custom ops with opsDef redis-shard-account-ops + +``` +kbcli cluster custom-ops redis-shard-account-ops [flags] +``` + +### Examples + +``` + # Create a redis-shard-account-ops ops + kbcli cluster custom-ops redis-shard-account-ops --component +``` + +### Options + +``` + --acl-command string ACL_COMMAND is the complete Redis command for account operations, such as: + 'ACL SETUSER alice-sen on ~* &* +@all #' or 'ACL DELUSER alice-sen', + It is recommended to use hashed passwords (#) instead of plaintext passwords when setting user credentials + + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for redis-shard-account-ops + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas int REPLICAS is the number of replicas of the Redis cluster. + + --shard-mode string SHARD_MODE indicates the mode of Redis cluster. When the cluster type is cluster, it must be set to "TRUE". + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx b/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx index 9e2924b2..e2f7e3ea 100644 --- a/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx +++ b/docs/en/release-1_0_1/cli/kbcli_playground_init.mdx @@ -34,9 +34,6 @@ kbcli playground init [flags] kbcli exec -it mycluster-mysql-0 bash mysql -h 127.1 -u root -p$MYSQL_ROOT_PASSWORD - # view the Grafana - kbcli dashboard open kubeblocks-grafana - # destroy playground kbcli playground destroy ``` diff --git a/scripts/sync-kbcli-docs.sh b/scripts/sync-kbcli-docs.sh index 4dc044cd..7b56d238 100755 --- a/scripts/sync-kbcli-docs.sh +++ b/scripts/sync-kbcli-docs.sh @@ -13,7 +13,7 @@ rm -rf docs/en/preview/cli # check out kbcli repo of branch v1.0.0 and copy docs/user_docs/cli to docs/zh/cli and docs/en/cli git clone -b "$BRANCH" "$REPO_URL" "$CLONE_DIR" -cd "$CLONE_DIR" && make kbcli-doc +cd "$CLONE_DIR" && rm -rf $KBCLI_DOCS_DIR && make kbcli-doc cd .. # cp -r "$CLONE_DIR/$KBCLI_DOCS_DIR" docs/zh/preview/cli cp -r "$CLONE_DIR/$KBCLI_DOCS_DIR" docs/en/preview/cli From 3f8a0e419536279bedf85a9cd64c5c76e38aebe5 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Tue, 16 Sep 2025 16:22:21 +0800 Subject: [PATCH 4/6] update v101 release notes --- .../user_docs/release_notes/release-10/101.mdx | 10 +++++----- .../user_docs/release_notes/release-10/101.mdx | 10 +++++----- .../user_docs/release_notes/release-10/101.mdx | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/en/preview/user_docs/release_notes/release-10/101.mdx b/docs/en/preview/user_docs/release_notes/release-10/101.mdx index fd306953..384d7d09 100644 --- a/docs/en/preview/user_docs/release_notes/release-10/101.mdx +++ b/docs/en/preview/user_docs/release_notes/release-10/101.mdx @@ -12,12 +12,12 @@ We are delighted to announce the release of KubeBlocks v1.0.1. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) -- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9587](https://github.com/apecloud/kubeblocks/pull/9587)) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. ([#9514](https://github.com/apecloud/kubeblocks/pull/9514)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9605](https://github.com/apecloud/kubeblocks/pull/9605)) - **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) -- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) -- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. ([#9656](https://github.com/apecloud/kubeblocks/pull/9656)) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. ([#9623](https://github.com/apecloud/kubeblocks/pull/9623)) - **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) - **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) diff --git a/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx b/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx index fd306953..384d7d09 100644 --- a/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx +++ b/docs/en/release-1_0/user_docs/release_notes/release-10/101.mdx @@ -12,12 +12,12 @@ We are delighted to announce the release of KubeBlocks v1.0.1. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) -- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9587](https://github.com/apecloud/kubeblocks/pull/9587)) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. ([#9514](https://github.com/apecloud/kubeblocks/pull/9514)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9605](https://github.com/apecloud/kubeblocks/pull/9605)) - **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) -- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) -- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. ([#9656](https://github.com/apecloud/kubeblocks/pull/9656)) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. ([#9623](https://github.com/apecloud/kubeblocks/pull/9623)) - **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) - **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx index aa8ccc20..384d7d09 100644 --- a/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-10/101.mdx @@ -12,12 +12,12 @@ We are delighted to announce the release of KubeBlocks v1.0.1. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9587](https://github.com/apecloud/kubeblocks/pull/9587) -- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. [#9514](https://github.com/apecloud/kubeblocks/pull/9514) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9605](https://github.com/apecloud/kubeblocks/pull/9605) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9587](https://github.com/apecloud/kubeblocks/pull/9587)) +- **Support user-defined environment variables in lifecycle actions** - Allows customization of environment variables for lifecycle actions. ([#9514](https://github.com/apecloud/kubeblocks/pull/9514)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9605](https://github.com/apecloud/kubeblocks/pull/9605)) - **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9480](https://github.com/apecloud/kubeblocks/pull/9480) -- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. [#9656](https://github.com/apecloud/kubeblocks/pull/9656) -- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. [#9623](https://github.com/apecloud/kubeblocks/pull/9623) +- **Support HTTP and gRPC actions** - Supports HTTP and gRPC in lifecycle actions. ([#9656](https://github.com/apecloud/kubeblocks/pull/9656)) +- **Scale out replicas from backup** - Supports to scale out replicas from backup in OpsRequest. ([#9623](https://github.com/apecloud/kubeblocks/pull/9623)) - **Improves resource resizing handling** - Support resize subresource in InPlacePodVerticalScaling when supported. ([#9532](https://github.com/apecloud/kubeblocks/pull/9532),[#9545](https://github.com/apecloud/kubeblocks/pull/9545)) - **Add azureblob storage provider** - Adds Azure Blob storage provider support.([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) @@ -32,7 +32,7 @@ We are delighted to announce the release of KubeBlocks v1.0.1. This release incl ### Redis - Support new Redis verions 8.2,8.0.3, 7.2.10 and 7.4.5/([#1968](https://github.com/apecloud/kubeblocks-addons/pull/1968), [#1785](https://github.com/apecloud/kubeblocks-addons/pull/1785), [#1710](https://github.com/apecloud/kubeblocks-addons/pull/1710)) - Fixed Redis memory leak when loading redisgears module ([#1855](https://github.com/apecloud/kubeblocks-addons/pull/1855)) -- Support reconfiguration on Redis cluster for client-output-buffer-limit ([#1973](https://github.com/apecloud/kubeblocks-addons/pull/1973)) +- Support reconfiguration of `client-output-buffer-limit` ([#1973](https://github.com/apecloud/kubeblocks-addons/pull/1973)) ### Clickhouse - Support Horizontal Scale for keeper ([#1713](https://github.com/apecloud/kubeblocks-addons/pull/1713)) From 836a4b03b2187cfee4c4fc323462475db1287f88 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Tue, 16 Sep 2025 16:23:40 +0800 Subject: [PATCH 5/6] chore: remove cli catetory yml --- docs/en/release-1_0/cli/_category_.yml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 docs/en/release-1_0/cli/_category_.yml diff --git a/docs/en/release-1_0/cli/_category_.yml b/docs/en/release-1_0/cli/_category_.yml deleted file mode 100644 index dff53aca..00000000 --- a/docs/en/release-1_0/cli/_category_.yml +++ /dev/null @@ -1,5 +0,0 @@ -position: 30 -label: Command Line -collapsible: true -collapsed: true -className: hide-children \ No newline at end of file From 0d0cc187178bb7359d79e1940649244141c6f7ff Mon Sep 17 00:00:00 2001 From: Shanshan Date: Tue, 16 Sep 2025 16:28:51 +0800 Subject: [PATCH 6/6] chore: update release notes --- blogs/en/announcing-kubeblocks-v0-9-5.mdx | 42 +++++++++---------- .../release_notes/release-09/095.mdx | 42 +++++++++---------- .../release_notes/release-09/095.mdx | 42 +++++++++---------- .../release_notes/release-09/095.mdx | 42 +++++++++---------- 4 files changed, 84 insertions(+), 84 deletions(-) diff --git a/blogs/en/announcing-kubeblocks-v0-9-5.mdx b/blogs/en/announcing-kubeblocks-v0-9-5.mdx index 8f91a3a5..2d56ed4a 100644 --- a/blogs/en/announcing-kubeblocks-v0-9-5.mdx +++ b/blogs/en/announcing-kubeblocks-v0-9-5.mdx @@ -15,49 +15,49 @@ We are delighted to announce the release of KubeBlocks v0.9.5. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9595](https://github.com/apecloud/kubeblocks/pull/9595) -- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9489](https://github.com/apecloud/kubeblocks/pull/9489) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9607](https://github.com/apecloud/kubeblocks/pull/9607) -- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. [#9536](https://github.com/apecloud/kubeblocks/pull/9536) [#9545](https://github.com/apecloud/kubeblocks/pull/9545) -- **Add azureblob storage provider** - Adds Azure Blob storage provider support. [#9492](https://github.com/apecloud/kubeblocks/pull/9492) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9595](https://github.com/apecloud/kubeblocks/pull/9595)) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. ([#9489](https://github.com/apecloud/kubeblocks/pull/9489)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9607](https://github.com/apecloud/kubeblocks/pull/9607)) +- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. ([#9536](https://github.com/apecloud/kubeblocks/pull/9536), [#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support. ([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) ## KubeBlocks Addons ### MySQL Variants (MySQL, GreatSQL) -- Support greatsql. [#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793) -- Support mysql audit log config. [#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890) -- Support greatsql auditlog config. [#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893) +- Support greatsql. ([#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793)) +- Support mysql audit log config. ([#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890)) +- Support greatsql auditlog config. ([#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893)) ### PostgreSQL -- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. [#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734) -- Support using etcd as DCS for PostgreSQL. [#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864) +- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. ([#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734)) +- Support using etcd as DCS for PostgreSQL. ([#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864)) ### Redis -- Support redis 7.2.10 and redis 8 [#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812) +- Support redis 7.2.10 and redis 8 ([#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812)) - Fix Redis slave instance memory leak when loading redisgears module. - Change redis maxmemory-policy to volatile-lru and maxmemory = 0.8 * limit_memory. ### MongoDB -- Support mongodb sharding. [#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701) +- Support mongodb sharding. ([#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701)) ### Elasticsearch -- Optimize backup. [#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853) -- Support new version 8.15.5. [#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929) +- Optimize backup. ([#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853)) +- Support new version 8.15.5. ([#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929)) ### ClickHouse -- Clickhouse support backup and restore. [#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800) -- Update metrics. [#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916) +- Clickhouse support backup and restore. ([#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800)) +- Update metrics. ([#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916)) ### ZooKeeper -- Improve zookeeper jvm setting and gc option. [#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771) [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938) +- Improve zookeeper jvm setting and gc option. ([#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771), [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938)) ### etcd -- Support etcd v3.6.1. [#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737) -- Improve etcd backup procedure. [#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740) -- Improve etcd params and configuration. [#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778) +- Support etcd v3.6.1. ([#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737)) +- Improve etcd backup procedure. ([#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740)) +- Improve etcd params and configuration. ([#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778)) ### Milvus -- Support to create Milvus clusters on arm. [#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792) +- Support to create Milvus clusters on arm. ([#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792)) ## Upgrade to v0.9.5 diff --git a/docs/en/preview/user_docs/release_notes/release-09/095.mdx b/docs/en/preview/user_docs/release_notes/release-09/095.mdx index 02e0f6d7..7a3e717a 100644 --- a/docs/en/preview/user_docs/release_notes/release-09/095.mdx +++ b/docs/en/preview/user_docs/release_notes/release-09/095.mdx @@ -12,49 +12,49 @@ We are delighted to announce the release of KubeBlocks v0.9.5. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9595](https://github.com/apecloud/kubeblocks/pull/9595) -- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9489](https://github.com/apecloud/kubeblocks/pull/9489) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9607](https://github.com/apecloud/kubeblocks/pull/9607) -- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. [#9536](https://github.com/apecloud/kubeblocks/pull/9536) [#9545](https://github.com/apecloud/kubeblocks/pull/9545) -- **Add azureblob storage provider** - Adds Azure Blob storage provider support. [#9492](https://github.com/apecloud/kubeblocks/pull/9492) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9595](https://github.com/apecloud/kubeblocks/pull/9595)) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. ([#9489](https://github.com/apecloud/kubeblocks/pull/9489)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9607](https://github.com/apecloud/kubeblocks/pull/9607)) +- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. ([#9536](https://github.com/apecloud/kubeblocks/pull/9536), [#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support. ([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) ## KubeBlocks Addons ### MySQL Variants (MySQL, GreatSQL) -- Support greatsql. [#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793) -- Support mysql audit log config. [#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890) -- Support greatsql auditlog config. [#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893) +- Support greatsql. ([#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793)) +- Support mysql audit log config. ([#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890)) +- Support greatsql auditlog config. ([#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893)) ### PostgreSQL -- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. [#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734) -- Support using etcd as DCS for PostgreSQL. [#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864) +- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. ([#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734)) +- Support using etcd as DCS for PostgreSQL. ([#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864)) ### Redis -- Support redis 7.2.10 and redis 8 [#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812) +- Support redis 7.2.10 and redis 8 ([#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812)) - Fix Redis slave instance memory leak when loading redisgears module. - Change redis maxmemory-policy to volatile-lru and maxmemory = 0.8 * limit_memory. ### MongoDB -- Support mongodb sharding. [#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701) +- Support mongodb sharding. ([#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701)) ### Elasticsearch -- Optimize backup. [#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853) -- Support new version 8.15.5. [#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929) +- Optimize backup. ([#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853)) +- Support new version 8.15.5. ([#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929)) ### ClickHouse -- Clickhouse support backup and restore. [#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800) -- Update metrics. [#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916) +- Clickhouse support backup and restore. ([#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800)) +- Update metrics. ([#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916)) ### ZooKeeper -- Improve zookeeper jvm setting and gc option. [#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771) [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938) +- Improve zookeeper jvm setting and gc option. ([#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771), [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938)) ### etcd -- Support etcd v3.6.1. [#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737) -- Improve etcd backup procedure. [#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740) -- Improve etcd params and configuration. [#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778) +- Support etcd v3.6.1. ([#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737)) +- Improve etcd backup procedure. ([#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740)) +- Improve etcd params and configuration. ([#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778)) ### Milvus -- Support to create Milvus clusters on arm. [#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792) +- Support to create Milvus clusters on arm. ([#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792)) ## Upgrade to v0.9.5 diff --git a/docs/en/release-1_0/user_docs/release_notes/release-09/095.mdx b/docs/en/release-1_0/user_docs/release_notes/release-09/095.mdx index 02e0f6d7..7a3e717a 100644 --- a/docs/en/release-1_0/user_docs/release_notes/release-09/095.mdx +++ b/docs/en/release-1_0/user_docs/release_notes/release-09/095.mdx @@ -12,49 +12,49 @@ We are delighted to announce the release of KubeBlocks v0.9.5. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9595](https://github.com/apecloud/kubeblocks/pull/9595) -- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9489](https://github.com/apecloud/kubeblocks/pull/9489) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9607](https://github.com/apecloud/kubeblocks/pull/9607) -- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. [#9536](https://github.com/apecloud/kubeblocks/pull/9536) [#9545](https://github.com/apecloud/kubeblocks/pull/9545) -- **Add azureblob storage provider** - Adds Azure Blob storage provider support. [#9492](https://github.com/apecloud/kubeblocks/pull/9492) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9595](https://github.com/apecloud/kubeblocks/pull/9595)) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. ([#9489](https://github.com/apecloud/kubeblocks/pull/9489)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9607](https://github.com/apecloud/kubeblocks/pull/9607)) +- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. ([#9536](https://github.com/apecloud/kubeblocks/pull/9536), [#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support. ([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) ## KubeBlocks Addons ### MySQL Variants (MySQL, GreatSQL) -- Support greatsql. [#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793) -- Support mysql audit log config. [#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890) -- Support greatsql auditlog config. [#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893) +- Support greatsql. ([#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793)) +- Support mysql audit log config. ([#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890)) +- Support greatsql auditlog config. ([#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893)) ### PostgreSQL -- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. [#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734) -- Support using etcd as DCS for PostgreSQL. [#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864) +- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. ([#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734)) +- Support using etcd as DCS for PostgreSQL. ([#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864)) ### Redis -- Support redis 7.2.10 and redis 8 [#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812) +- Support redis 7.2.10 and redis 8 ([#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812)) - Fix Redis slave instance memory leak when loading redisgears module. - Change redis maxmemory-policy to volatile-lru and maxmemory = 0.8 * limit_memory. ### MongoDB -- Support mongodb sharding. [#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701) +- Support mongodb sharding. ([#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701)) ### Elasticsearch -- Optimize backup. [#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853) -- Support new version 8.15.5. [#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929) +- Optimize backup. ([#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853)) +- Support new version 8.15.5. ([#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929)) ### ClickHouse -- Clickhouse support backup and restore. [#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800) -- Update metrics. [#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916) +- Clickhouse support backup and restore. ([#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800)) +- Update metrics. ([#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916)) ### ZooKeeper -- Improve zookeeper jvm setting and gc option. [#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771) [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938) +- Improve zookeeper jvm setting and gc option. ([#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771), [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938)) ### etcd -- Support etcd v3.6.1. [#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737) -- Improve etcd backup procedure. [#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740) -- Improve etcd params and configuration. [#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778) +- Support etcd v3.6.1. ([#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737)) +- Improve etcd backup procedure. ([#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740)) +- Improve etcd params and configuration. ([#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778)) ### Milvus -- Support to create Milvus clusters on arm. [#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792) +- Support to create Milvus clusters on arm. ([#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792)) ## Upgrade to v0.9.5 diff --git a/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx b/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx index 02e0f6d7..7a3e717a 100644 --- a/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx +++ b/docs/en/release-1_0_1/user_docs/release_notes/release-09/095.mdx @@ -12,49 +12,49 @@ We are delighted to announce the release of KubeBlocks v0.9.5. This release incl ### Features -- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. [#9595](https://github.com/apecloud/kubeblocks/pull/9595) -- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. [#9489](https://github.com/apecloud/kubeblocks/pull/9489) -- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. [#9607](https://github.com/apecloud/kubeblocks/pull/9607) -- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. [#9536](https://github.com/apecloud/kubeblocks/pull/9536) [#9545](https://github.com/apecloud/kubeblocks/pull/9545) -- **Add azureblob storage provider** - Adds Azure Blob storage provider support. [#9492](https://github.com/apecloud/kubeblocks/pull/9492) +- **Improve the settings of the manager** - Improves performance, stability, and configurability of KubeBlocks Operator. ([#9595](https://github.com/apecloud/kubeblocks/pull/9595)) +- **Specify allowed symbols in password generation** - Allows customization of symbols used in generated passwords. ([#9489](https://github.com/apecloud/kubeblocks/pull/9489)) +- **OpsDefinition supports image mapping for multi service version** - Enables image mapping for different service versions in operations definitions. ([#9607](https://github.com/apecloud/kubeblocks/pull/9607)) +- **Support resize subresource in InPlacePodVerticalScaling** - Improves resource resizing handling for K8s v1.32 and above. ([#9536](https://github.com/apecloud/kubeblocks/pull/9536), [#9545](https://github.com/apecloud/kubeblocks/pull/9545)) +- **Add azureblob storage provider** - Adds Azure Blob storage provider support. ([#9492](https://github.com/apecloud/kubeblocks/pull/9492)) ## KubeBlocks Addons ### MySQL Variants (MySQL, GreatSQL) -- Support greatsql. [#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793) -- Support mysql audit log config. [#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890) -- Support greatsql auditlog config. [#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893) +- Support greatsql. ([#1793](https://github.com/apecloud/kubeblocks-addons/pull/1793)) +- Support mysql audit log config. ([#1890](https://github.com/apecloud/kubeblocks-addons/pull/1890)) +- Support greatsql auditlog config. ([#1893](https://github.com/apecloud/kubeblocks-addons/pull/1893)) ### PostgreSQL -- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. [#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734) -- Support using etcd as DCS for PostgreSQL. [#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864) +- Refactor PostgreSQL addon and support PostgreSQL 16 and 17. ([#1734](https://github.com/apecloud/kubeblocks-addons/pull/1734)) +- Support using etcd as DCS for PostgreSQL. ([#1864](https://github.com/apecloud/kubeblocks-addons/pull/1864)) ### Redis -- Support redis 7.2.10 and redis 8 [#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812) +- Support redis 7.2.10 and redis 8 ([#1812](https://github.com/apecloud/kubeblocks-addons/pull/1812)) - Fix Redis slave instance memory leak when loading redisgears module. - Change redis maxmemory-policy to volatile-lru and maxmemory = 0.8 * limit_memory. ### MongoDB -- Support mongodb sharding. [#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701) +- Support mongodb sharding. ([#1701](https://github.com/apecloud/kubeblocks-addons/pull/1701)) ### Elasticsearch -- Optimize backup. [#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853) -- Support new version 8.15.5. [#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929) +- Optimize backup. ([#1853](https://github.com/apecloud/kubeblocks-addons/pull/1853)) +- Support new version 8.15.5. ([#1929](https://github.com/apecloud/kubeblocks-addons/pull/1929)) ### ClickHouse -- Clickhouse support backup and restore. [#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800) -- Update metrics. [#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916) +- Clickhouse support backup and restore. ([#1800](https://github.com/apecloud/kubeblocks-addons/pull/1800)) +- Update metrics. ([#1916](https://github.com/apecloud/kubeblocks-addons/pull/1916)) ### ZooKeeper -- Improve zookeeper jvm setting and gc option. [#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771) [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938) +- Improve zookeeper jvm setting and gc option. ([#1771](https://github.com/apecloud/kubeblocks-addons/pull/1771), [#1938](https://github.com/apecloud/kubeblocks-addons/pull/1938)) ### etcd -- Support etcd v3.6.1. [#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737) -- Improve etcd backup procedure. [#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740) -- Improve etcd params and configuration. [#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778) +- Support etcd v3.6.1. ([#1737](https://github.com/apecloud/kubeblocks-addons/pull/1737)) +- Improve etcd backup procedure. ([#1740](https://github.com/apecloud/kubeblocks-addons/pull/1740)) +- Improve etcd params and configuration. ([#1778](https://github.com/apecloud/kubeblocks-addons/pull/1778)) ### Milvus -- Support to create Milvus clusters on arm. [#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792) +- Support to create Milvus clusters on arm. ([#1792](https://github.com/apecloud/kubeblocks-addons/pull/1792)) ## Upgrade to v0.9.5