Skip to content
This repository was archived by the owner on Nov 16, 2023. It is now read-only.

Commit 84a1d1d

Browse files
authored
Merge pull request #19 from sayar/support-ssl
Adding Config with TLS on external Loadbalancer - Refactoring external liveness test to support.
2 parents 3a8d011 + f79fb3d commit 84a1d1d

File tree

7 files changed

+125
-33
lines changed

7 files changed

+125
-33
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
test/temp
22
generated/
3+
components/
34
helm_repos/
45
.DS_Store

config/tlsloadbalancer.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
config:
2+
subcomponents:
3+
kafka-cluster:
4+
config:
5+
externalLoadBalancerTls: true

helm-chart/kafka-cluster/templates/kafka-cluster.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ spec:
3131
listeners:
3232
external:
3333
type: loadbalancer
34-
tls: false # by default true
34+
tls: {{ .Values.externalLoadBalancerTls }}
3535
plain:
3636
networkPolicyPeers:
3737
- podSelector:

helm-chart/kafka-cluster/values.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,6 @@
22
# This is a YAML-formatted file.
33
# Declare variables to be passed into your templates.
44

5+
externalLoadBalancerTls: false
6+
57
kafkaLogDirs: /var/lib/kafka/data-0

test/README.md

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,16 +21,20 @@ The test results will displayed at the end. The script with exit with code 0 for
2121
- `kubectl` should be configured to point to this cluster
2222
- The Kafka deployment should be configured with an external loadbalancer listener.
2323
- [Reference Guide](https://strimzi.io/2019/05/13/accessing-kafka-part-4.html)
24-
- Currently TLS is _not_ supported for this test
24+
- The External LoadBalancer can have TLS enabled or disabled.
2525
- `kafkacat` needs to be installed and added to $PATH
26+
- on systems with homebrew installed, you can run `brew install kafkacat`
2627
- [Kafkacat Installation](https://github.com/edenhill/kafkacat#install)
2728

2829
### Running the test
2930

30-
Simply run `sh ./externaltest.sh`. The script will retrieve the broker's external address
31+
Run the testing script as configured below:
32+
- If the External LoadBalancer is enabled with TLS support: `sh ./externaltest.sh -t`
33+
- If the External LoadBalancer is NOT enabled with TLS support: `sh ./externaltest.sh`
34+
35+
The script will deploy a test topic and connect to the brokers through the external loadbalancer IP, utilizing kafkacat as a producer and consumer.
3136
The test results will displayed at the end. The script with exit with code 0 for success and 1 for failure.
3237

33-
3438
# Replication
3539

3640
A sample configuration for mirror maker is included with the repo. Upload the destination cluster's certificate into a secret called `mirrormaker-cluster-ca-cert` in the `kafka` namespace, and add the IP address of the destination cluster's broker into `mirror-maker.yaml`. You will also need to create a KafkaUser for the second cluster in order to authenticate Mirror Maker with your cluster. To test out Mirror Maker, run ./test/mirror-maker.sh to set up clients within the Kafka namespace authenticated with mutual TLS authentication. You can then test out replication by using the kafkaclient pods:

test/externaltest.sh

Lines changed: 105 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,69 @@
11
#! /usr/bin/env bash
22

3+
#### CONSTANTS --------------------------------
34
YELLOW='\033[1;33m'
45
RED='\033[1;31m'
56
GREEN='\033[0;32m'
67
NC='\033[0m' # No Color
78

8-
# TODO: Pass by argument
9+
#### FUNCTIONS --------------------------------
10+
create_and_deploy_kafka_test_topic_yaml()
11+
{
12+
TESTING_TOPIC=$1
13+
echo "apiVersion: kafka.strimzi.io/v1beta1
14+
kind: KafkaTopic
15+
metadata:
16+
name: ${TESTING_TOPIC}
17+
namespace: kafka
18+
labels:
19+
strimzi.io/cluster: kcluster
20+
spec:
21+
partitions: 3
22+
replicas: 2
23+
config:
24+
retention.ms: 7200000
25+
segment.bytes: 1073741824" > temp/${TESTING_TOPIC}/kafka-test-topic.yaml
26+
27+
kubectl apply -f temp/${TESTING_TOPIC}/kafka-test-topic.yaml
28+
29+
sleep 2
30+
}
31+
32+
create_and_deploy_kafka_test_user_yaml()
33+
{
34+
TESTING_TOPIC=$1
35+
echo "apiVersion: kafka.strimzi.io/v1alpha1
36+
kind: KafkaUser
37+
metadata:
38+
name: ${TESTING_TOPIC}-user
39+
namespace: kafka
40+
labels:
41+
strimzi.io/cluster: kcluster
42+
spec:
43+
authentication:
44+
type: tls
45+
authorization:
46+
type: simple
47+
acls:
48+
- resource:
49+
type: topic
50+
name: ${TESTING_TOPIC}
51+
patternType: literal
52+
operation: All" > temp/${TESTING_TOPIC}/kafka-test-user.yaml
53+
54+
kubectl apply -f temp/${TESTING_TOPIC}/kafka-test-user.yaml
55+
56+
sleep 2
57+
}
58+
59+
cleanup_cluster()
60+
{
61+
TESTING_TOPIC=$1
62+
kubectl delete --recursive -f ./temp/${TESTING_TOPIC}
63+
}
64+
65+
#### MAIN --------------------------------
66+
967
# Get Broker LoadBalancer Address
1068
BROKER_LB_IP=`kubectl get svc -n kafka kcluster-kafka-external-bootstrap --output jsonpath='{.status.loadBalancer.ingress[0].ip}'`
1169
echo $BROKER_LB_IP
@@ -23,50 +81,73 @@ echo $UUID
2381
TESTING_TOPIC="topic-${UUID}"
2482
echo "${YELLOW}Test Topic: ${TESTING_TOPIC}${NC}"
2583

26-
# TODO: Deploy via CRD with kafka-topics.yaml
27-
# Deploy via kafka broker pod - Alternatively this can be done through the CRD.
28-
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic $TESTING_TOPIC --partitions 3 --replication-factor 2
84+
# Create testing directory
85+
mkdir temp/${TESTING_TOPIC}
86+
87+
# Deploy test topic
88+
create_and_deploy_kafka_test_topic_yaml $TESTING_TOPIC
89+
90+
kubectl apply -f temp/${TESTING_TOPIC}/kafka-test-topic.yaml
91+
92+
sleep 2
93+
94+
# Create Kafkacat configuration based if TLS/SSL enforcement is enabled
95+
if [ $1 == "-t" ]; then
96+
# TLS Enabled on cluster
97+
echo "${YELLOW}Configuring Kafkacat with SSL${NC}"
98+
# Deploy test user with access to test topic
99+
create_and_deploy_kafka_test_user_yaml $TESTING_TOPIC
100+
101+
# Get test user credentials
102+
echo `kubectl get secrets $TESTING_TOPIC-user -o jsonpath="{.data['user\.crt']}"` | base64 --decode > temp/${TESTING_TOPIC}/user.crt
103+
echo `kubectl get secrets $TESTING_TOPIC-user -o jsonpath="{.data['user\.key']}"` | base64 --decode > temp/${TESTING_TOPIC}/user.key
104+
105+
# Get kafka cluster CA cert
106+
echo `kubectl get secrets kcluster-cluster-ca-cert -o jsonpath="{.data['ca\.crt']}"` | base64 --decode > temp/${TESTING_TOPIC}/ca.crt
107+
108+
# Create kafkacat.config file
109+
echo "bootstrap.servers=${BROKER_EXTERNAL_ADDRESS}
110+
security.protocol=ssl
111+
ssl.key.location=temp/${TESTING_TOPIC}/user.key
112+
ssl.certificate.location=temp/${TESTING_TOPIC}/user.crt
113+
ssl.ca.location=temp/${TESTING_TOPIC}/ca.crt" > temp/${TESTING_TOPIC}/kafkacat.config
114+
115+
else
116+
echo "${YELLOW}Configuring Kafkacat without SSL${NC}"
117+
# TLS Disabled on cluster
118+
# Create kafkacat.config file
119+
echo "bootstrap.servers=${BROKER_EXTERNAL_ADDRESS}" > temp/${TESTING_TOPIC}/kafkacat.config
120+
fi
29121

30122
# Create random test messages
31-
MESSAGE_INPUT_FILE="./temp/${TESTING_TOPIC}-input-messages.txt"
123+
MESSAGE_INPUT_FILE="./temp/${TESTING_TOPIC}/input-messages.txt"
32124

33125
echo "Creating Input Message file."
34126
for i in {0..9}
35127
do
36128
MESSAGE=`uuidgen`
37-
# echo "Message: ${MESSAGE}"
38129
echo "${MESSAGE}" >> $MESSAGE_INPUT_FILE
39130
done
40131

41132
cat $MESSAGE_INPUT_FILE
42133

43134
# Produce messages through Kafkacat - connecting through external LoadBalancer IP
44-
cat $MESSAGE_INPUT_FILE | kafkacat -P -b $BROKER_EXTERNAL_ADDRESS -t $TESTING_TOPIC
135+
cat $MESSAGE_INPUT_FILE | kafkacat -P -F temp/${TESTING_TOPIC}/kafkacat.config -t $TESTING_TOPIC
45136

46137
# Consume messages through Kafkacat - connecting through external LoadBalancer IP
47-
MESSAGE_OUTPUT_FILE="./temp/${TESTING_TOPIC}-output-messages.txt"
48-
kafkacat -C -b $BROKER_EXTERNAL_ADDRESS -t $TESTING_TOPIC > $MESSAGE_OUTPUT_FILE &
138+
MESSAGE_OUTPUT_FILE="./temp/${TESTING_TOPIC}/output-messages.txt"
139+
kafkacat -C -F temp/${TESTING_TOPIC}/kafkacat.config -t $TESTING_TOPIC > $MESSAGE_OUTPUT_FILE &
49140

50-
# TODO: verify this also kills the process on kafka client. We cannot remove the topic until the consumer is gone.
51141
CONSUMER_PID=$!
52-
sleep 10
142+
sleep 5
53143
kill $CONSUMER_PID
54144

55-
echo "listing topics"
56-
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --list --zookeeper localhost:2181
57-
58-
# Delete test topic
59-
echo "deleting test topic"
60-
echo "kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic ${TESTING_TOPIC}"
61-
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic $TESTING_TOPIC
62-
63-
echo "listing topics after deletion"
64-
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --list --zookeeper localhost:2181
145+
# Delete test topic and user
146+
cleanup_cluster $TESTING_TOPIC
65147

66-
# TODO: Compare what was produced and what was consumed.
67148
# Compare contents of input and output
68-
SORTED_INPUT="./temp/sorted-input.txt"
69-
SORTED_OUTPUT="./temp/sorted-output.txt"
149+
SORTED_INPUT="./temp/${TESTING_TOPIC}/sorted-input.txt"
150+
SORTED_OUTPUT="./temp/${TESTING_TOPIC}/sorted-output.txt"
70151
sort $MESSAGE_INPUT_FILE > $SORTED_INPUT
71152
sort $MESSAGE_OUTPUT_FILE > $SORTED_OUTPUT
72153

test/internaltest.sh

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,12 @@ echo "${YELLOW}Test Topic: ${TESTING_TOPIC}${NC}"
2626
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic $TESTING_TOPIC --partitions 3 --replication-factor 2
2727

2828
# Create random test messages
29-
MESSAGE_INPUT_FILE="./temp/${TESTING_TOPIC}-input-messages.txt"
29+
MESSAGE_INPUT_FILE="./temp/${TESTING_TOPIC}/input-messages.txt"
3030

3131
echo "Creating Input Message file."
3232
for i in {0..9}
3333
do
3434
MESSAGE=`uuidgen`
35-
# echo "Message: ${MESSAGE}"
3635
echo "${MESSAGE}" >> $MESSAGE_INPUT_FILE
3736
done
3837

@@ -42,7 +41,7 @@ cat $MESSAGE_INPUT_FILE
4241
kubectl exec -n kafka -i kafkaclient-0 -- bin/kafka-console-producer.sh --broker-list kcluster-kafka-brokers:9092 --topic $TESTING_TOPIC < $MESSAGE_INPUT_FILE
4342

4443
# Consume messages from topic
45-
MESSAGE_OUTPUT_FILE="./temp/${TESTING_TOPIC}-output-messages.txt"
44+
MESSAGE_OUTPUT_FILE="./temp/${TESTING_TOPIC}/output-messages.txt"
4645
kubectl exec -n kafka -i kafkaclient-0 -- bin/kafka-console-consumer.sh --bootstrap-server kcluster-kafka-bootstrap:9092 --topic $TESTING_TOPIC --from-beginning > $MESSAGE_OUTPUT_FILE &
4746

4847
# TODO: verify this also kills the process on kafka client. We cannot remove the topic until the consumer is gone.
@@ -71,8 +70,8 @@ echo "listing topics after deletion"
7170
kubectl exec -n kafka -ti kcluster-kafka-0 --container kafka -- bin/kafka-topics.sh --list --zookeeper localhost:2181
7271

7372
# Compare contents of input and output
74-
SORTED_INPUT="./temp/sorted-input.txt"
75-
SORTED_OUTPUT="./temp/sorted-output.txt"
73+
SORTED_INPUT="./temp/${TESTING_TOPIC}/sorted-input.txt"
74+
SORTED_OUTPUT="./temp/${TESTING_TOPIC}/sorted-output.txt"
7675
sort $MESSAGE_INPUT_FILE > $SORTED_INPUT
7776
sort $MESSAGE_OUTPUT_FILE > $SORTED_OUTPUT
7877

0 commit comments

Comments
 (0)