Skip to content

Commit 5760238

Browse files
committed
fix test target
1 parent 627ad3c commit 5760238

File tree

3 files changed

+124
-17
lines changed

3 files changed

+124
-17
lines changed

test/config/kind-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,6 @@ nodes:
3131
# Configure networking
3232
networking:
3333
apiServerAddress: "127.0.0.1"
34-
apiServerPort: 6443
34+
apiServerPort: 6444
3535
podSubnet: "10.244.0.0/16"
3636
serviceSubnet: "10.96.0.0/16"

test/scripts/cleanup-test-env.sh

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,12 @@ NC='\033[0m' # No Color
1212

1313
echo -e "${YELLOW}Cleaning up component test environment...${NC}"
1414

15-
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test"}
15+
# Use the same logic as setup script for cluster naming
16+
if [ -n "${CI:-}" ] || [ -n "${GITHUB_ACTIONS:-}" ]; then
17+
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test-${GITHUB_RUN_ID:-$$}"}
18+
else
19+
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test"}
20+
fi
1621

1722
# Only delete the specific test cluster, not any existing clusters
1823
if kind get clusters | grep -q "^${CLUSTER_NAME}$"; then
@@ -27,6 +32,12 @@ echo -e "${YELLOW}Cleaning up any remaining processes...${NC}"
2732
pkill -f "kind.*${CLUSTER_NAME}" || true
2833
pkill -f "kubectl.*port-forward" || true
2934

35+
# Clean up temporary config file
36+
if [ -f "/tmp/kind-config-${CLUSTER_NAME}.yaml" ]; then
37+
echo -e "${YELLOW}Removing temporary KIND config file${NC}"
38+
rm -f "/tmp/kind-config-${CLUSTER_NAME}.yaml"
39+
fi
40+
3041
# Restore original kubectl context
3142
if [ -f /tmp/original-kubectl-context ]; then
3243
ORIGINAL_CONTEXT=$(cat /tmp/original-kubectl-context)

test/scripts/setup-test-env.sh

Lines changed: 111 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -39,33 +39,124 @@ else
3939
fi
4040

4141
# Check if our test cluster already exists
42-
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test"}
42+
# Use a more unique name in CI environments
43+
if [ -n "${CI:-}" ] || [ -n "${GITHUB_ACTIONS:-}" ]; then
44+
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test-${GITHUB_RUN_ID:-$$}"}
45+
else
46+
CLUSTER_NAME=${KIND_CLUSTER_NAME:-"tenant-controller-test"}
47+
fi
4348
CONFIG_FILE=${KIND_CONFIG_FILE:-"test/config/kind-config.yaml"}
4449

50+
# Function to find an available API server port
51+
find_available_port() {
52+
local start_port=6444
53+
local max_port=6500
54+
55+
for port in $(seq $start_port $max_port); do
56+
if ! netstat -tlnp 2>/dev/null | grep -q ":$port "; then
57+
echo "$port"
58+
return 0
59+
fi
60+
done
61+
62+
echo "6444" # fallback
63+
}
64+
65+
# In CI environments, dynamically assign API server port to avoid conflicts
66+
if [ -n "${CI:-}" ] || [ -n "${GITHUB_ACTIONS:-}" ]; then
67+
AVAILABLE_API_PORT=$(find_available_port)
68+
echo -e "${YELLOW}Using API server port: ${AVAILABLE_API_PORT}${NC}"
69+
70+
# Create a temporary config file with the available port
71+
TEMP_CONFIG="/tmp/kind-config-${CLUSTER_NAME}.yaml"
72+
if [ -f "$CONFIG_FILE" ]; then
73+
# Replace the apiServerPort in the config
74+
sed "s/apiServerPort: [0-9]*/apiServerPort: ${AVAILABLE_API_PORT}/" "$CONFIG_FILE" > "$TEMP_CONFIG"
75+
CONFIG_FILE="$TEMP_CONFIG"
76+
fi
77+
fi
78+
79+
# Function to create cluster with retry logic
80+
create_cluster() {
81+
local max_retries=3
82+
local retry=1
83+
84+
while [ $retry -le $max_retries ]; do
85+
echo -e "${YELLOW}Creating KIND cluster: ${CLUSTER_NAME} (attempt $retry/$max_retries)${NC}"
86+
87+
if [ -f "$CONFIG_FILE" ]; then
88+
if kind create cluster --name "$CLUSTER_NAME" --config "$CONFIG_FILE" --wait 300s; then
89+
echo -e "${GREEN}Successfully created cluster ${CLUSTER_NAME}${NC}"
90+
return 0
91+
fi
92+
else
93+
echo -e "${YELLOW}Config file $CONFIG_FILE not found, creating cluster with default settings${NC}"
94+
if kind create cluster --name "$CLUSTER_NAME" --wait 300s; then
95+
echo -e "${GREEN}Successfully created cluster ${CLUSTER_NAME}${NC}"
96+
return 0
97+
fi
98+
fi
99+
100+
echo -e "${RED}Failed to create cluster (attempt $retry/$max_retries)${NC}"
101+
102+
# If it's a port conflict, try to clean up existing clusters first
103+
if [ $retry -eq 1 ]; then
104+
echo -e "${YELLOW}Cleaning up any existing clusters that might cause port conflicts...${NC}"
105+
106+
# Show what's using common Kubernetes ports
107+
echo -e "${YELLOW}Checking port usage:${NC}"
108+
netstat -tlnp 2>/dev/null | grep -E ":6443|:6444" || true
109+
110+
# List all KIND clusters
111+
echo -e "${YELLOW}Current KIND clusters:${NC}"
112+
kind get clusters || true
113+
114+
# Clean up any existing test clusters
115+
for cluster in $(kind get clusters 2>/dev/null | grep -E "(tenant-controller|test)" || true); do
116+
echo -e "${YELLOW}Deleting potentially conflicting cluster: $cluster${NC}"
117+
kind delete cluster --name "$cluster" 2>/dev/null || true
118+
done
119+
120+
# Also try to clean up any docker containers that might be leftover
121+
echo -e "${YELLOW}Cleaning up any leftover KIND containers...${NC}"
122+
docker ps -a --filter="label=io.x-k8s.kind.cluster" --format="{{.Names}}" | while read container; do
123+
if [[ "$container" == *"tenant-controller"* ]] || [[ "$container" == *"test"* ]]; then
124+
echo -e "${YELLOW}Removing container: $container${NC}"
125+
docker rm -f "$container" 2>/dev/null || true
126+
fi
127+
done
128+
129+
sleep 3
130+
fi
131+
132+
retry=$((retry + 1))
133+
if [ $retry -le $max_retries ]; then
134+
sleep 5
135+
fi
136+
done
137+
138+
echo -e "${RED}Failed to create cluster after $max_retries attempts${NC}"
139+
140+
# Clean up temporary config file if it exists
141+
if [ -f "/tmp/kind-config-${CLUSTER_NAME}.yaml" ]; then
142+
rm -f "/tmp/kind-config-${CLUSTER_NAME}.yaml"
143+
fi
144+
145+
return 1
146+
}
147+
45148
if kind get clusters | grep -q "^${CLUSTER_NAME}$"; then
46149
echo -e "${YELLOW}Test cluster ${CLUSTER_NAME} already exists, checking context...${NC}"
47150
# Check if the context exists, if not recreate it
48151
if ! kubectl config get-contexts -o name | grep -q "kind-${CLUSTER_NAME}"; then
49152
echo -e "${YELLOW}Context for ${CLUSTER_NAME} missing, recreating...${NC}"
50153
kind delete cluster --name "$CLUSTER_NAME"
51-
echo -e "${YELLOW}Creating KIND cluster: ${CLUSTER_NAME}${NC}"
52-
if [ -f "$CONFIG_FILE" ]; then
53-
kind create cluster --name "$CLUSTER_NAME" --config "$CONFIG_FILE" --wait 300s
54-
else
55-
echo -e "${YELLOW}Config file $CONFIG_FILE not found, creating cluster with default settings${NC}"
56-
kind create cluster --name "$CLUSTER_NAME" --wait 300s
57-
fi
154+
create_cluster
58155
else
59156
echo -e "${GREEN}Test cluster and context already exist, using existing setup${NC}"
60157
fi
61158
else
62-
echo -e "${YELLOW}Creating KIND cluster: ${CLUSTER_NAME}${NC}"
63-
if [ -f "$CONFIG_FILE" ]; then
64-
kind create cluster --name "$CLUSTER_NAME" --config "$CONFIG_FILE" --wait 300s
65-
else
66-
echo -e "${YELLOW}Config file $CONFIG_FILE not found, creating cluster with default settings${NC}"
67-
kind create cluster --name "$CLUSTER_NAME" --wait 300s
68-
fi
159+
create_cluster
69160
fi
70161

71162
# Set kubectl context to our test cluster
@@ -162,6 +253,11 @@ test_service_via_kubectl "Harbor" "harbor" "mock-harbor" "/api/v2.0/health"
162253
test_service_via_kubectl "Keycloak" "keycloak" "mock-keycloak" "/health"
163254
test_service_via_kubectl "Catalog" "orch-app" "mock-catalog" "/health"
164255

256+
# Clean up temporary config file if it exists
257+
if [ -f "/tmp/kind-config-${CLUSTER_NAME}.yaml" ]; then
258+
rm -f "/tmp/kind-config-${CLUSTER_NAME}.yaml"
259+
fi
260+
165261
echo -e "${GREEN}Component test environment setup complete!${NC}"
166262
echo -e "${GREEN}Services are deployed and accessible via kubectl port-forward${NC}"
167263
echo -e " Harbor: kubectl port-forward -n harbor svc/mock-harbor 8080:80"

0 commit comments

Comments
 (0)