Skip to content

Commit af28807

Browse files
committed
refactoring and cleanups on architecture and minikube handling
1 parent aa9c64a commit af28807

File tree

15 files changed

+104
-182
lines changed

15 files changed

+104
-182
lines changed

.evergreen-functions.yml

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ functions:
231231
working_dir: src/github.com/mongodb/mongodb-kubernetes
232232
add_to_path:
233233
- ${workdir}/bin
234-
binary: scripts/dev/configure_docker_auth.sh
234+
binary: scripts/dev/configure_container_auth.sh
235235

236236
setup_evg_host: &setup_evg_host
237237
command: subprocess.exec
@@ -280,10 +280,17 @@ functions:
280280
command: scripts/evergreen/setup_minikube_host.sh
281281

282282
prune_docker_resources:
283-
- command: subprocess.exec
283+
- command: shell.exec
284284
type: setup
285285
params:
286-
command: "docker system prune -a -f"
286+
shell: bash
287+
script: |
288+
if command -v docker >/dev/null 2>&1; then
289+
echo "Docker found, pruning docker resources..."
290+
docker system prune -a -f
291+
else
292+
echo "Docker not found, skipping docker resource pruning"
293+
fi
287294
288295
# the task configures the set of tools necessary for any task working with K8 cluster:
289296
# installs kubectl, jq, kind (if necessary), configures docker authentication

.evergreen.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ variables:
140140

141141
- &teardown_group
142142
teardown_group:
143-
- func: teardown_kubernetes_environment
143+
- func: prune_docker_resources
144144
- func: run_retry_script
145145

146146
- &base_om7_dependency

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ ac:
147147
# in parallel and both call 'aws_login') then Docker login may return an error "Error saving credentials:..The
148148
# specified item already exists in the keychain". Seems this allows to ignore the error
149149
aws_login:
150-
@ scripts/dev/configure_docker_auth.sh
150+
@ scripts/dev/configure_container_auth.sh
151151

152152
# cleans up aws resources, including s3 buckets which are older than 5 hours
153153
aws_cleanup:

docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh

Lines changed: 22 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -91,15 +91,28 @@ download_agent() {
9191
AGENT_VERSION="${MDB_AGENT_VERSION}"
9292
fi
9393

94-
if [ "$(arch)" = "x86_64" ]; then
95-
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz"
96-
elif [ "$(arch)" = "arm64" ]; then
97-
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz"
98-
elif [ "$(arch)" = "ppc64le" ]; then
99-
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz"
100-
elif [ "$(arch)" = "s390x" ]; then
101-
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz"
102-
fi
94+
# Detect architecture for agent download
95+
local detected_arch
96+
detected_arch=$(uname -m)
97+
98+
case "${detected_arch}" in
99+
x86_64)
100+
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz"
101+
;;
102+
aarch64|arm64)
103+
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz"
104+
;;
105+
ppc64le)
106+
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz"
107+
;;
108+
s390x)
109+
AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz"
110+
;;
111+
*)
112+
script_log "Error: Unsupported architecture for MongoDB agent: ${detected_arch}"
113+
exit 1
114+
;;
115+
esac
103116

104117
script_log "Downloading Agent version: ${AGENT_VERSION}"
105118
script_log "Downloading a Mongodb Agent from ${base_url:?}"
Lines changed: 4 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,9 @@ source scripts/funcs/checks
88
source scripts/funcs/printing
99
source scripts/funcs/kubernetes
1010

11-
# Parse command line arguments
1211
CONTAINER_RUNTIME="${CONTAINER_RUNTIME-"docker"}"
1312

14-
# Validate and set up container runtime configuration
15-
setup_container_runtime() {
13+
setup_validate_container_runtime() {
1614
case "${CONTAINER_RUNTIME}" in
1715
"podman")
1816
if ! command -v podman &> /dev/null; then
@@ -38,7 +36,6 @@ setup_container_runtime() {
3836
;;
3937
esac
4038

41-
# Create config directory
4239
if [[ "$USE_SUDO" == "true" ]]; then
4340
sudo mkdir -p "$(dirname "${CONFIG_PATH}")"
4441
else
@@ -76,51 +73,20 @@ write_file() {
7673
fi
7774
}
7875

79-
check_docker_daemon_is_running() {
80-
if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
81-
# Podman doesn't require a daemon
82-
echo "Using Podman (no daemon required)"
83-
return 0
84-
fi
85-
86-
if [[ "$(uname -s)" != "Linux" ]]; then
87-
echo "Skipping docker daemon check when not running in Linux"
88-
return 0
89-
fi
90-
91-
if systemctl is-active --quiet docker; then
92-
echo "Docker is already running."
93-
else
94-
echo "Docker is not running. Starting Docker..."
95-
# Start the Docker daemon
96-
sudo systemctl start docker
97-
for _ in {1..15}; do
98-
if systemctl is-active --quiet docker; then
99-
echo "Docker started successfully."
100-
return 0
101-
fi
102-
echo "Waiting for Docker to start..."
103-
sleep 3
104-
done
105-
fi
106-
}
107-
10876
remove_element() {
10977
local config_option="$1"
110-
local tmpfile=$(mktemp)
78+
local tmpfile
79+
tmpfile=$(mktemp)
11180

112-
# Initialize config file if it doesn't exist
11381
if [[ ! -f "${CONFIG_PATH}" ]]; then
11482
write_file '{}' "${CONFIG_PATH}"
11583
fi
11684

117-
# Remove the specified element using jq
11885
exec_cmd jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" > "${tmpfile}"
11986
exec_cmd cp "${tmpfile}" "${CONFIG_PATH}"
12087
rm "${tmpfile}"
12188
}
12289

123-
# Container runtime login wrapper
12490
container_login() {
12591
local username="$1"
12692
local registry="$2"
@@ -132,21 +98,14 @@ container_login() {
13298
fi
13399
}
134100

135-
# This is the script which performs container authentication to different registries that we use (so far ECR and RedHat)
136-
# As the result of this login the config file will have all the 'auth' information necessary to work with container registries
137-
138-
setup_container_runtime
139-
140-
check_docker_daemon_is_running
101+
setup_validate_container_runtime
141102

142-
# Initialize config file if it doesn't exist
143103
if [[ ! -f "${CONFIG_PATH}" ]]; then
144104
write_file '{}' "${CONFIG_PATH}"
145105
fi
146106

147107
if [[ -f "${CONFIG_PATH}" ]]; then
148108
if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then
149-
# Check if login is actually required by making a HEAD request to ECR using existing credentials
150109
echo "Checking if container registry credentials are valid..."
151110
ecr_auth=$(exec_cmd jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}")
152111

scripts/dev/prepare_local_e2e_run.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ ensure_namespace "${NAMESPACE}" 2>&1 | prepend "ensure_namespace"
4949

5050
echo "Deleting ~/.docker/.config.json and re-creating it"
5151
rm ~/.docker/config.json || true
52-
scripts/dev/configure_docker_auth.sh 2>&1 | prepend "configure_docker_auth"
52+
scripts/dev/configure_container_auth.sh 2>&1 | prepend "configure_docker_auth"
5353

5454
echo "Configuring operator"
5555
scripts/evergreen/e2e/configure_operator.sh 2>&1 | prepend "configure_operator"

scripts/evergreen/setup_aws.sh

Lines changed: 5 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2,34 +2,16 @@
22
set -Eeou pipefail
33

44
source scripts/dev/set_env_context.sh
5+
source scripts/funcs/install
56

6-
# Detect system architecture
7-
detect_architecture() {
8-
local arch
9-
arch=$(uname -m)
10-
echo "Detected architecture: ${arch}" >&2
11-
echo "${arch}"
12-
}
13-
14-
# Install AWS CLI v2 via binary download (for x86_64 and aarch64)
7+
# Install AWS CLI v2 via binary download (for amd64 and arm64)
158
install_aws_cli_binary() {
169
local arch="$1"
1710
echo "Installing AWS CLI v2 via binary download for ${arch}..."
1811

1912
# Map architecture names for AWS CLI download URLs
2013
local aws_arch
21-
case "${arch}" in
22-
x86_64)
23-
aws_arch="x86_64"
24-
;;
25-
aarch64|arm64)
26-
aws_arch="aarch64"
27-
;;
28-
*)
29-
echo "Error: Unsupported architecture for binary installation: ${arch}" >&2
30-
return 1
31-
;;
32-
esac
14+
aws_arch=$(uname -m)
3315

3416
# Download and install AWS CLI v2
3517
local temp_dir
@@ -38,7 +20,7 @@ install_aws_cli_binary() {
3820

3921
echo "Downloading AWS CLI v2 for ${aws_arch}..."
4022
curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${aws_arch}.zip" -o "awscliv2.zip"
41-
23+
4224
unzip -q awscliv2.zip
4325
sudo ./aws/install --update
4426

@@ -97,11 +79,7 @@ install_aws_cli() {
9779
arch=$(detect_architecture)
9880

9981
case "${arch}" in
100-
ppc64le|s390x)
101-
echo "IBM architecture detected (${arch}). Using pip installation..."
102-
install_aws_cli_pip
103-
;;
104-
x86_64|aarch64|arm64)
82+
amd64|arm64)
10583
echo "Standard architecture detected (${arch}). Using binary installation..."
10684
install_aws_cli_binary "${arch}"
10785
;;

scripts/evergreen/setup_jq.sh

Lines changed: 2 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -9,32 +9,7 @@ set -Eeou pipefail
99

1010
source scripts/funcs/install
1111

12-
# Detect and map architecture for jq releases
13-
detect_jq_architecture() {
14-
local arch
15-
arch=$(uname -m)
16-
17-
case "${arch}" in
18-
x86_64)
19-
echo "amd64"
20-
;;
21-
aarch64|arm64)
22-
echo "arm64"
23-
;;
24-
ppc64le)
25-
echo "ppc64el" # jq uses ppc64el instead of ppc64le
26-
;;
27-
s390x)
28-
echo "s390x"
29-
;;
30-
*)
31-
echo "Error: Unsupported architecture for jq: ${arch}" >&2
32-
exit 1
33-
;;
34-
esac
35-
}
36-
37-
jq_arch=$(detect_jq_architecture)
38-
echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}"
12+
jq_arch=$(detect_architecture "jq")
13+
echo "Detected architecture: ${jq_arch}"
3914

4015
download_and_install_binary "${PROJECT_DIR:-${workdir}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}"

scripts/evergreen/setup_kind.sh

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,12 @@
22
set -Eeou pipefail
33

44
source scripts/dev/set_env_context.sh
5+
source scripts/funcs/install
56

67
# Store the lowercase name of Operating System
78
os=$(uname | tr '[:upper:]' '[:lower:]')
89
# Detect architecture
9-
arch=$(uname -m)
10-
case ${arch} in
11-
x86_64) arch_suffix="amd64" ;;
12-
aarch64|arm64) arch_suffix="arm64" ;;
13-
*) echo "Unsupported architecture: ${arch}" >&2; exit 1 ;;
14-
esac
10+
arch_suffix=$(detect_architecture)
1511
# This should be changed when needed
1612
latest_version="v0.27.0"
1713

scripts/evergreen/setup_kubectl.sh

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2,32 +2,7 @@
22
set -Eeou pipefail
33

44
source scripts/dev/set_env_context.sh
5-
6-
# Detect system architecture and map to kubectl/helm architecture names
7-
detect_architecture() {
8-
local arch
9-
arch=$(uname -m)
10-
11-
case "${arch}" in
12-
x86_64)
13-
echo "amd64"
14-
;;
15-
aarch64|arm64)
16-
echo "arm64"
17-
;;
18-
ppc64le)
19-
echo "ppc64le"
20-
;;
21-
s390x)
22-
echo "s390x"
23-
;;
24-
*)
25-
echo "Unsupported architecture: ${arch}" >&2
26-
echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2
27-
exit 1
28-
;;
29-
esac
30-
}
5+
source scripts/funcs/install
316

327
# Detect the current architecture
338
ARCH=$(detect_architecture)

0 commit comments

Comments
 (0)