Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions deployment/charts/cluster-manager/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 2.2.4-dev
appVersion: 2.2.4-dev
version: 2.2.4-dev-44608293
appVersion: 2.2.4-dev-44608293
annotations:
revision: ba0ca8920f3cdecf780d3bf2e73d1960a3d36bb5
created: "2025-10-22T14:43:03Z"
revision: 446082930be7fa62ff5260577ddf8ab9a84fa78d
created: "2025-11-04T23:52:19Z"
8 changes: 4 additions & 4 deletions deployment/charts/cluster-template-crd/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ apiVersion: v2
name: cluster-template-crd
description: A Helm chart for the ClusterTemplate CRD
type: application
version: 2.2.4-dev
appVersion: 2.2.4-dev
version: 2.2.4-dev-44608293
appVersion: 2.2.4-dev-44608293
annotations:
revision: ba0ca8920f3cdecf780d3bf2e73d1960a3d36bb5
created: "2025-10-22T14:43:03Z"
revision: 446082930be7fa62ff5260577ddf8ab9a84fa78d
created: "2025-11-04T23:52:19Z"
2 changes: 1 addition & 1 deletion internal/rest/getv2clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func generateClusterInfo(name, version string, lifecycleIndicator api.StatusIndi
LifecyclePhase: &api.GenericStatus{Indicator: statusIndicatorPtr(lifecycleIndicator), Message: ptr(lifecycleMessage), Timestamp: ptr(uint64(0))},
ControlPlaneReady: &api.GenericStatus{Indicator: statusIndicatorPtr(lifecycleIndicator), Message: ptr("condition not found"), Timestamp: ptr(uint64(0))},
InfrastructureReady: &api.GenericStatus{Indicator: statusIndicatorPtr(lifecycleIndicator), Message: ptr("condition not found"), Timestamp: ptr(uint64(0))},
NodeHealth: &api.GenericStatus{Indicator: statusIndicatorPtr(api.STATUSINDICATIONIDLE), Message: ptr("nodes are healthy"), Timestamp: ptr(uint64(0))},
NodeHealth: &api.GenericStatus{Indicator: statusIndicatorPtr(api.STATUSINDICATIONUNSPECIFIED), Message: ptr("condition not found"), Timestamp: ptr(uint64(0))},
NodeQuantity: ptr(0),
ProviderStatus: &api.GenericStatus{Indicator: statusIndicatorPtr(api.STATUSINDICATIONUNSPECIFIED), Message: ptr("condition not found"), Timestamp: ptr(uint64(0))},
}
Expand Down
51 changes: 32 additions & 19 deletions internal/rest/getv2clusterssummary_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,40 @@ var clusterStatusUnknown = capi.ClusterStatus{

func createMockServer(t *testing.T, clusters []capi.Cluster, projectID string, options ...bool) *rest.Server {
unstructuredClusters := make([]unstructured.Unstructured, len(clusters))
unstructuredMachines := make([]unstructured.Unstructured, len(clusters))
for i, cluster := range clusters {
unstructuredCluster, err := convert.ToUnstructured(cluster)
require.NoError(t, err, "convertClusterToUnstructured() error = %v, want nil")
unstructuredClusters[i] = *unstructuredCluster

machine := capi.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name,
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": cluster.Name,
},
},
Spec: capi.MachineSpec{ClusterName: cluster.Name},
Status: capi.MachineStatus{
Phase: "Running",
Conditions: []capi.Condition{
{Type: "HealthCheckSucceed", Status: "True"},
{Type: "InfrastructureReady", Status: "True"},
{Type: "NodeHealthy", Status: "True"},
},
},
}

unstructuredMachine, err := convert.ToUnstructured(machine)
require.NoError(t, err, "convertClusterToUnstructured() error = %v, want nil")
unstructuredMachines[i] = *unstructuredMachine
}
unstructuredClusterList := &unstructured.UnstructuredList{
Items: unstructuredClusters,
}
unstructuredMachineList := &unstructured.UnstructuredList{
Items: unstructuredMachines,
}
// default is to set up k8s client and machineResource mocks
setupK8sMocks := true
mockMachineResource := true
Expand All @@ -101,31 +127,18 @@ func createMockServer(t *testing.T, clusters []capi.Cluster, projectID string, o
var mockedk8sclient *k8s.MockInterface
mockedk8sclient = k8s.NewMockInterface(t)
if setupK8sMocks {
machine := capi.Machine{
Status: capi.MachineStatus{
Phase: "Running",
Conditions: []capi.Condition{
{Type: "HealthCheckSucceed", Status: "True"},
{Type: "InfrastructureReady", Status: "True"},
{Type: "NodeHealthy", Status: "True"},
},
},
}
unstructuredMachine, err := convert.ToUnstructured(machine)
require.NoError(t, err, "convertMachineToUnstructured() error = %v, want nil")
resource := k8s.NewMockResourceInterface(t)
resource.EXPECT().List(mock.Anything, metav1.ListOptions{}).Return(unstructuredClusterList, nil)
nsResource := k8s.NewMockNamespaceableResourceInterface(t)
nsResource.EXPECT().Namespace(projectID).Return(resource)
mockedk8sclient = k8s.NewMockInterface(t)
mockedk8sclient.EXPECT().Resource(core.ClusterResourceSchema).Return(nsResource)
if mockMachineResource {
for _, cluster := range clusters {
resource.EXPECT().List(mock.Anything, metav1.ListOptions{
LabelSelector: "cluster.x-k8s.io/cluster-name=" + cluster.Name,
}).Return(&unstructured.UnstructuredList{Items: []unstructured.Unstructured{*unstructuredMachine}}, nil).Maybe()
}
mockedk8sclient.EXPECT().Resource(core.MachineResourceSchema).Return(nsResource).Maybe()
machineResource := k8s.NewMockResourceInterface(t)
machineResource.EXPECT().List(mock.Anything, metav1.ListOptions{}).Return(unstructuredMachineList, nil)
nsMachineResource := k8s.NewMockNamespaceableResourceInterface(t)
nsMachineResource.EXPECT().Namespace(projectID).Return(machineResource)
mockedk8sclient.EXPECT().Resource(core.MachineResourceSchema).Return(nsMachineResource).Maybe()
}
}
return rest.NewServer(mockedk8sclient)
Expand All @@ -142,7 +155,7 @@ func generateClusterWithStatus(name, version *string, status capi.ClusterStatus)
}
return capi.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
Spec: capi.ClusterSpec{Topology: &capi.Topology{Version: clusterVersion}},
Spec: capi.ClusterSpec{Paused: false, Topology: &capi.Topology{Version: clusterVersion}},
Status: status,
}
}
Expand Down
1 change: 1 addition & 0 deletions internal/rest/postv2clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ func (s *Server) createCluster(ctx context.Context, cli *k8s.Client, namespace,
},
Variables: variables,
},
Paused: true,
},
}

Expand Down
4 changes: 4 additions & 0 deletions internal/rest/postv2clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ func TestPostV2Clusters201(t *testing.T) {
},
},
Spec: capi.ClusterSpec{
Paused: true,
ClusterNetwork: &capi.ClusterNetwork{
Pods: &capi.NetworkRanges{
CIDRBlocks: []string{"10.0.0.0/16"},
Expand Down Expand Up @@ -196,6 +197,7 @@ func TestPostV2Clusters201(t *testing.T) {
},
},
Spec: capi.ClusterSpec{
Paused: true,
ClusterNetwork: &capi.ClusterNetwork{
Pods: &capi.NetworkRanges{
CIDRBlocks: []string{"10.0.0.0/16"},
Expand Down Expand Up @@ -360,6 +362,7 @@ func TestPostV2Clusters201K3sAirGap(t *testing.T) {
},
},
Spec: capi.ClusterSpec{
Paused: true,
ClusterNetwork: &capi.ClusterNetwork{
Pods: &capi.NetworkRanges{
CIDRBlocks: []string{"10.0.0.0/16"},
Expand Down Expand Up @@ -851,6 +854,7 @@ func TestPostV2Clusters500(t *testing.T) {
},
},
Spec: capi.ClusterSpec{
Paused: true,
ClusterNetwork: &capi.ClusterNetwork{
Pods: &capi.NetworkRanges{
CIDRBlocks: []string{"10.0.0.0/16"},
Expand Down
28 changes: 20 additions & 8 deletions internal/rest/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,13 +162,22 @@ func getClusterLifecyclePhase(cluster *capi.Cluster) (*api.GenericStatus, []erro
Message: new(string),
Timestamp: new(uint64),
}

var errorReasons []error
if len(cluster.Status.Conditions) == 0 {
*status.Indicator = api.STATUSINDICATIONUNSPECIFIED
*status.Message = "Condition not found"
*status.Timestamp = 0
return &status, errorReasons
}

*status.Timestamp = uint64(cluster.Status.Conditions[0].LastTransitionTime.UTC().Unix())

if cluster.Spec.Paused {
*status.Indicator = api.STATUSINDICATIONIDLE
*status.Message = "waiting for nodes"
return &status, errorReasons
}
// ClusterPhase is a string representation of a Cluster Phase.
// It is a high-level indicator of the status of the Cluster
// as it is provisioned, from the API user’s perspective.
Expand Down Expand Up @@ -212,7 +221,6 @@ func getClusterLifecyclePhase(cluster *capi.Cluster) (*api.GenericStatus, []erro
return nil, errorReasons
}

*status.Timestamp = uint64(cluster.Status.Conditions[0].LastTransitionTime.UTC().Unix())
return &status, errorReasons
}

Expand Down Expand Up @@ -279,7 +287,17 @@ func getNodeHealth(cluster *capi.Cluster, machines []unstructured.Unstructured)
}
}

if inProgress {
if len(cluster.Status.Conditions) > 0 {
*status.Timestamp = uint64(cluster.Status.Conditions[0].LastTransitionTime.UTC().Unix())
} else {
*status.Timestamp = 0
}

if totalMachines == 0 {
*status.Indicator = api.STATUSINDICATIONUNSPECIFIED
*status.Message = "condition not found"
*status.Timestamp = 0
} else if inProgress {
*status.Indicator = api.STATUSINDICATIONINPROGRESS
*status.Message = fmt.Sprintf("node(s) health unknown (%v/%v);%s", machinesRunning, totalMachines, messages)
} else if allHealthy && machinesRunning == totalMachines {
Expand All @@ -290,12 +308,6 @@ func getNodeHealth(cluster *capi.Cluster, machines []unstructured.Unstructured)
*status.Message = fmt.Sprintf("nodes are unhealthy (%v/%v);%s", machinesRunning, totalMachines, machineMessage)
}

if len(cluster.Status.Conditions) > 0 {
*status.Timestamp = uint64(cluster.Status.Conditions[0].LastTransitionTime.UTC().Unix())
} else {
*status.Timestamp = 0
}

return status
}

Expand Down
Loading