Skip to content
89 changes: 89 additions & 0 deletions default-cluster-templates/baseline-kubeadm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
{
"name": "baseline-kubeadm",
"version": "v2.0.1",
"kubernetesVersion": "v1.30.10",
"description": "Baseline Cluster Template with Kubeadm Control Plane",
"controlplaneprovidertype": "kubeadm",
"infraprovidertype": "intel",
"clusterconfiguration": {
"kind": "KubeadmControlPlaneTemplate",
"apiVersion": "controlplane.cluster.x-k8s.io/v1beta1",
"metadata": {
"labels": {
"cpumanager": "true"
}
},
"spec": {
"template": {
"spec": {
"files": [
{
"path": "/usr/local/bin/append-containerd-config.sh",
"content": "cat <<EOF >> /etc/containerd/config.toml\n\n[plugins.\\\"io.containerd.internal.v1.opt\\\"]\n path = \\\"/var/lib/rancher/rke2/agent/containerd\\\"\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\"]\n stream_server_address = \\\"127.0.0.1\\\"\n stream_server_port = \\\"10010\\\"\n enable_selinux = false\n enable_unprivileged_ports = true\n enable_unprivileged_icmp = true\n sandbox_image = \\\"index.docker.io/rancher/mirrored-pause:3.6\\\"\n disable_apparmor = true\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".containerd]\n snapshotter = \\\"overlayfs\\\"\n disable_snapshot_annotations = true\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".containerd.runtimes.runc]\n runtime_type = \\\"io.containerd.runc.v2\\\"\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".containerd.runtimes.runc.options]\n SystemdCgroup = true\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".registry]\n config_path = \\\"/var/lib/rancher/rke2/agent/etc/containerd/certs.d\\\"\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".containerd.runtimes.kata-qemu]\n runtime_type = \\\"io.containerd.kata-qemu.v2\\\"\n runtime_path = \\\"/opt/kata/bin/containerd-shim-kata-v2\\\"\n privileged_without_host_devices = true\n pod_annotations = [\\\"io.katacontainers.*\\\"]\n\n[plugins.\\\"io.containerd.grpc.v1.cri\\\".containerd.runtimes.kata-qemu.options]\n ConfigPath = \\\"/opt/kata/share/defaults/kata-containers/configuration-qemu.toml\\\"\n\nEOF"
}
],
"clusterConfiguration": {
"apiServer": {
"extraArgs": {
"feature-gates": "PortForwardWebsockets=true",
"tls-cipher-suites":"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
}
},
"controllerManager":{
"extraArgs": {}
},
"etcd":{
"local": {
"extraArgs": {
"cipher-suites": "[TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384]"
}
}
},
"scheduler":{
"extraArgs": {}
}
},
"joinConfiguration": {
"nodeRegistration": {
"kubeletExtraArgs": {
"topology-manager-policy": "best-effort",
"cpu-manager-policy": "static",
"reserved-cpus": "1",
"max-pods": "250",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
}
}
},
"preKubeadmCommands": [
"/usr/local/bin/append-containerd-config.sh"
],
"postKubeadmCommands": [
"kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml"
],
"nodeDrainTimeout": "2m",
"rolloutStrategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxSurge": 1
}
}
}
}
}
},
"clusterNetwork": {
"pods": {
"cidrBlocks": [
"10.42.0.0/16"
]
},
"services": {
"cidrBlocks": [
"10.43.0.0/16"
]
}
},
"cluster-labels": {
"default-extension": "baseline"
}
}
47 changes: 47 additions & 0 deletions default-cluster-templates/kubeadm.config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# This is the configuration file added to the default templates in the spec.files.content field.
# To update the cluster template, make sure you have the following tools installed in your environment:
# - grep (https://www.gnu.org/software/grep/)
# - jo (https://github.com/jpmens/jo)
# - jq (https://jqlang.org/)
# - sed (https://www.gnu.org/software/sed/)
# The string in the template can be created from this file using the following command:
# jo content="$(grep -v '^#' kubeadm.config.toml)"|sed 's/\\"/\\\\\\\"/g'|jq .content
# The result can then be pasted into the template files as the "content".
# The "sed" command performs triple backslashing of quotes, which seems to be required.
cat <<EOF >> /etc/containerd/config.toml

[plugins."io.containerd.internal.v1.opt"]
path = "/var/lib/rancher/rke2/agent/containerd"

[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
enable_selinux = false
enable_unprivileged_ports = true
enable_unprivileged_icmp = true
sandbox_image = "index.docker.io/rancher/mirrored-pause:3.6"
disable_apparmor = true

[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
disable_snapshot_annotations = true

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/var/lib/rancher/rke2/agent/etc/containerd/certs.d"

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu]
runtime_type = "io.containerd.kata-qemu.v2"
runtime_path = "/opt/kata/bin/containerd-shim-kata-v2"
privileged_without_host_devices = true
pod_annotations = ["io.katacontainers.*"]

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu.options]
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"

EOF
110 changes: 110 additions & 0 deletions internal/controller/clustertemplate_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,3 +188,113 @@ var _ = Describe("ClusterTemplate Controller", func() {
),
)
})

var _ = Describe("ClusterTemplate Controller for RKE2 CP and Intel Infra", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"

ctx := context.Background()

typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
clustertemplate := &clusterv1alpha1.ClusterTemplate{}

BeforeEach(func() {
By("creating the custom resource for the Kind ClusterTemplate")
err := k8sClient.Get(ctx, typeNamespacedName, clustertemplate)
if err != nil && errors.IsNotFound(err) {
resource := &clusterv1alpha1.ClusterTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
Spec: clusterv1alpha1.ClusterTemplateSpec{
ControlPlaneProviderType: "kubeadm",
InfraProviderType: "intel",
KubernetesVersion: "v1.30.10",
ClusterConfiguration: "{\"apiVersion\":\"controlplane.cluster.x-k8s.io/v1beta1\",\"kind\":\"KubeadmControlPlaneTemplate\",\"metadata\":{\"name\":\"kubeadm-control-plane-template-v0.1.0\"},\"spec\":{\"template\":{\"spec\":{\"kubeadmConfigSpec\":{\"clusterConfiguration\":{\"apiServer\":{\"certSANs\":[\"localhost\",\"127.0.0.1\",\"0.0.0.0\",\"host.docker.internal\"]}},\"initConfiguration\":{\"nodeRegistration\":{}},\"joinConfiguration\":{\"nodeRegistration\":{}},\"postKubeadmCommands\":[\"kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml\"]}}}}}",
ClusterNetwork: clusterv1alpha1.ClusterNetwork{
Services: &clusterv1alpha1.NetworkRanges{
CIDRBlocks: []string{"10.43.0.0/16"},
},
Pods: &clusterv1alpha1.NetworkRanges{
CIDRBlocks: []string{"10.42.0.0/16"},
},
},
ClusterLabels: map[string]string{
"default-extension": "privileged",
},
},
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})

AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &clusterv1alpha1.ClusterTemplate{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())

By("validating the finalizer is present")
Expect(controllerutil.ContainsFinalizer(resource, clusterv1alpha1.ClusterTemplateFinalizer)).To(BeTrue())

By("Cleanup the specific resource instance ClusterTemplate")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())

By("Reconciling the deleted resource")
controllerReconciler := &ClusterTemplateReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}

_, err = controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())

By("validating the finalizer is removed")
err = k8sClient.Get(ctx, typeNamespacedName, resource)
if err == nil {
Expect(controllerutil.ContainsFinalizer(resource, clusterv1alpha1.ClusterTemplateFinalizer)).To(BeFalse())
}
})

It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &ClusterTemplateReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}

_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())

Expect(k8sClient.Get(ctx, typeNamespacedName, clustertemplate)).To(Succeed())
Expect(clustertemplate.Status.Ready).To(BeTrue())

By("validating the IntelMachineTemplate is created")
err = k8sClient.Get(ctx, types.NamespacedName{
Name: fmt.Sprintf("%s-controlplane", typeNamespacedName.Name),
Namespace: typeNamespacedName.Namespace,
}, &intelv1alpha1.IntelMachineTemplate{})
Expect(err).NotTo(HaveOccurred())

By("validating the IntelClusterTemplate is created")
err = k8sClient.Get(ctx, typeNamespacedName, &intelv1alpha1.IntelClusterTemplate{})
Expect(err).NotTo(HaveOccurred())

By("validating the KubeadmControlPlaneTemplate is created")
err = k8sClient.Get(ctx, typeNamespacedName, &kubeadmcpv1beta1.KubeadmControlPlaneTemplate{})
Expect(err).NotTo(HaveOccurred())

By("validating the ClusterClass is created")
err = k8sClient.Get(ctx, typeNamespacedName, &capiv1beta1.ClusterClass{})
Expect(err).NotTo(HaveOccurred())
})
})
})
Loading
Loading