diff --git a/pkg/controller/as3Handler_test.go b/pkg/controller/as3Handler_test.go index 6e282aed0..3133efbc7 100644 --- a/pkg/controller/as3Handler_test.go +++ b/pkg/controller/as3Handler_test.go @@ -6,10 +6,11 @@ import ( "net/http" "strings" + "sync" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/intstr" - "sync" ) var _ = Describe("AS3Handler Tests", func() { @@ -853,5 +854,4 @@ var _ = Describe("AS3Handler Tests", func() { Expect(config.tenantResponseMap["test"].agentResponseCode).To(Equal(http.StatusUnprocessableEntity), "Response code should be 422") }) }) - }) diff --git a/pkg/controller/clusterHandler.go b/pkg/controller/clusterHandler.go index b14998d88..0af4c8333 100644 --- a/pkg/controller/clusterHandler.go +++ b/pkg/controller/clusterHandler.go @@ -59,12 +59,12 @@ func (ch *ClusterHandler) addClusterConfig(clusterName string, config *ClusterCo ch.Unlock() } -// deleteClusterConfig removes a cluster configuration from the ClusterHandler. -func (ch *ClusterHandler) deleteClusterConfig(clusterName string) { - ch.Lock() - delete(ch.ClusterConfigs, clusterName) - ch.Unlock() -} +//// deleteClusterConfig removes a cluster configuration from the ClusterHandler. +//func (ch *ClusterHandler) deleteClusterConfig(clusterName string) { +// ch.Lock() +// delete(ch.ClusterConfigs, clusterName) +// ch.Unlock() +//} // getClusterConfig returns the cluster configuration for the specified cluster. func (ch *ClusterHandler) getClusterConfig(clusterName string) *ClusterConfig { @@ -96,37 +96,37 @@ func (ch *ClusterHandler) getInformerStore(clusterName string) *InformerStore { } // enqueueEvent adds an event to the eventQueue after checking for uniqueness. -func (ch *ClusterHandler) enqueueEvent(clusterName string, obj interface{}) { - key := fmt.Sprintf("%s/%s", clusterName, "") - if _, exists := ch.uniqueAppIdentifier[key]; exists { - fmt.Printf("Duplicate event discarded: %s\n", key) - return - } - ch.uniqueAppIdentifier[key] = struct{}{} - ch.eventQueue.Add(obj) - fmt.Printf("Event queued: %s\n", key) -} - -// ProcessEvents processes events from the eventQueue, applying deduplication and passing unique events to the controller. -func (ch *ClusterHandler) ProcessEvents() { - for { - obj, shutdown := ch.eventQueue.Get() - if shutdown { - break - } - - // Process event - ch.processEvent(obj) - ch.eventQueue.Done(obj) - } -} +//func (ch *ClusterHandler) enqueueEvent(clusterName string, obj interface{}) { +// key := fmt.Sprintf("%s/%s", clusterName, "") +// if _, exists := ch.uniqueAppIdentifier[key]; exists { +// fmt.Printf("Duplicate event discarded: %s\n", key) +// return +// } +// ch.uniqueAppIdentifier[key] = struct{}{} +// ch.eventQueue.Add(obj) +// fmt.Printf("Event queued: %s\n", key) +//} + +//// ProcessEvents processes events from the eventQueue, applying deduplication and passing unique events to the controller. +//func (ch *ClusterHandler) ProcessEvents() { +// for { +// obj, shutdown := ch.eventQueue.Get() +// if shutdown { +// break +// } +// +// // Process event +// ch.processEvent(obj) +// ch.eventQueue.Done(obj) +// } +//} // processEvent handles individual events, simulating sending to a controller. -func (ch *ClusterHandler) processEvent(obj interface{}) { - // Here you would handle the business logic for the event. - fmt.Printf("Processing event: %v\n", obj) - // Add actual controller handling logic here. -} +//func (ch *ClusterHandler) processEvent(obj interface{}) { +// // Here you would handle the business logic for the event. +// fmt.Printf("Processing event: %v\n", obj) +// // Add actual controller handling logic here. +//} // remove any cluster which is not provided in externalClustersConfig or not part of the HA cluster func (ch *ClusterHandler) cleanClusterCache(primaryClusterName, secondaryClusterName string, activeClusters map[string]bool) { @@ -262,17 +262,17 @@ func (ch *ClusterHandler) getMonitoredNamespaces(clusterName string) map[string] return ns } -func (ch *ClusterHandler) getClusterNames() map[string]struct{} { - ch.RLock() - defer ch.RUnlock() - clusterNames := make(map[string]struct{}) - for clusterName, config := range ch.ClusterConfigs { - if config.clusterDetails.ServiceTypeLBDiscovery { - clusterNames[clusterName] = struct{}{} - } - } - return clusterNames -} +//func (ch *ClusterHandler) getClusterNames() map[string]struct{} { +// ch.RLock() +// defer ch.RUnlock() +// clusterNames := make(map[string]struct{}) +// for clusterName, config := range ch.ClusterConfigs { +// if config.clusterDetails.ServiceTypeLBDiscovery { +// clusterNames[clusterName] = struct{}{} +// } +// } +// return clusterNames +//} // ResourceStatusUpdater is a go routine that listens to the resourceStatusUpdateChan func (ch *ClusterHandler) ResourceStatusUpdater() { diff --git a/pkg/controller/controller_suit_test.go b/pkg/controller/controller_suit_test.go index 2b4b2ed40..e6ecc1583 100644 --- a/pkg/controller/controller_suit_test.go +++ b/pkg/controller/controller_suit_test.go @@ -6,13 +6,14 @@ import ( "fmt" "github.com/f5devcentral/go-bigip" "io" + "net/http" + "strings" + "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" dynamicfake "k8s.io/client-go/dynamic/fake" - "net/http" - "strings" - "testing" cisapiv1 "github.com/F5Networks/k8s-bigip-ctlr/v2/config/apis/cis/v1" "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/test" @@ -123,6 +124,8 @@ func newMockRequestHandler(writer writer.Writer) *RequestHandler { }, } return &RequestHandler{ + reqChan: make(chan ResourceConfigRequest, 1), + respChan: make(chan *agentPostConfig, 1), PrimaryBigIPWorker: &Agent{ APIHandler: &APIHandler{LTM: <MAPIHandler{ &BaseAPIHandler{ diff --git a/pkg/controller/eventNotifier.go b/pkg/controller/eventNotifier.go index c75176e09..929a58685 100644 --- a/pkg/controller/eventNotifier.go +++ b/pkg/controller/eventNotifier.go @@ -80,26 +80,26 @@ func (en *EventNotifier) CreateNotifierForNamespace( return evNotifier } -// Get the notifier for a namespace -func (en *EventNotifier) GetNotifierForNamespace( - namespace string, -) *NamespaceEventNotifier { - - en.mutex.Lock() - defer en.mutex.Unlock() - - evNotifier, found := en.notifierMap[namespace] - if !found { - return nil - } - return evNotifier -} - -func (en *EventNotifier) DeleteNotifierForNamespace(namespace string) { - en.mutex.Lock() - defer en.mutex.Unlock() - delete(en.notifierMap, namespace) -} +//// Get the notifier for a namespace +//func (en *EventNotifier) GetNotifierForNamespace( +// namespace string, +//) *NamespaceEventNotifier { +// +// en.mutex.Lock() +// defer en.mutex.Unlock() +// +// evNotifier, found := en.notifierMap[namespace] +// if !found { +// return nil +// } +// return evNotifier +//} +// +//func (en *EventNotifier) DeleteNotifierForNamespace(namespace string) { +// en.mutex.Lock() +// defer en.mutex.Unlock() +// delete(en.notifierMap, namespace) +//} func (nen *NamespaceEventNotifier) RecordEvent( obj runtime.Object, diff --git a/pkg/controller/gtmBackend_test.go b/pkg/controller/gtmBackend_test.go index 8e6efcbc9..cf27f8463 100644 --- a/pkg/controller/gtmBackend_test.go +++ b/pkg/controller/gtmBackend_test.go @@ -50,6 +50,8 @@ var _ = Describe("Backend Tests", func() { BaseAPIHandler: mockBaseAPIHandler, }, } + agent.GTM.PostManager.firstPost = true + agent.GTM.PostManager.PostDelay = 30 go agent.gtmWorker() agent.GTM.PostManager.postChan <- postConfig response := <-agent.GTM.PostManager.respChan @@ -57,6 +59,8 @@ var _ = Describe("Backend Tests", func() { Expect(response).NotTo(BeNil(), "response should not be nil") Expect(response.tenantResponseMap["test_gtm"].agentResponseCode).To(Equal(http.StatusOK), "response code should be 200") + postConfig.incomingTenantDeclMap = make(map[string]as3Tenant) + agent.GTM.PostManager.postChan <- postConfig close(agent.LTM.PostManager.postChan) close(agent.LTM.PostManager.respChan) }) diff --git a/pkg/controller/multiClusterHealthProbeManager_test.go b/pkg/controller/multiClusterHealthProbeManager_test.go index 84125b5dc..53a87a81e 100644 --- a/pkg/controller/multiClusterHealthProbeManager_test.go +++ b/pkg/controller/multiClusterHealthProbeManager_test.go @@ -1,6 +1,9 @@ package controller import ( + "crypto/tls" + "net/http" + "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/teem" "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -10,7 +13,6 @@ import ( "gopkg.in/yaml.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sfake "k8s.io/client-go/kubernetes/fake" - "net/http" ) var _ = Describe("Multi Cluster Health Probe", func() { @@ -124,7 +126,9 @@ extendedRouteSpec: Expect(mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.statusRunning).To(BeFalse(), "incorrect primary cluster health status") mockCtlr.getPrimaryClusterHealthStatus() Expect(mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.statusRunning).To(BeFalse(), "incorrect primary cluster health status") - + mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.statusChanged = false + mockCtlr.getPrimaryClusterHealthStatus() + Expect(mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.statusRunning).To(BeFalse(), "incorrect primary cluster health status") }) It("Check Primary Cluster HealthProbe with valid http endpoint", func() { server := ghttp.NewServer() @@ -139,6 +143,30 @@ extendedRouteSpec: Expect(mockCtlr.RequestHandler.checkPrimaryClusterHealthStatus()).To(BeTrue(), "incorrect primary cluster health status") server.Close() }) + It("Check Primary Cluster HealthProbe with valid https endpoint", func() { + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + server := ghttp.NewTLSServer() + statusCode := 200 + server.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/"), + ghttp.RespondWithJSONEncoded(statusCode, ""), + )) + es.HAClusterConfig.PrimaryClusterEndPoint = "https://" + server.Addr() + mockCtlr.updateHealthProbeConfig(es.HAClusterConfig) + mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.EndPointType = "https" + Expect(mockCtlr.RequestHandler.checkPrimaryClusterHealthStatus()).To(BeTrue(), "incorrect primary cluster health status") + server.Close() + }) + It("Check Primary Cluster HealthProbe with invalid https endpoint", func() { + Expect(mockCtlr.RequestHandler.getPrimaryClusterHealthStatusFromHTTPSEndPoint()).To(BeFalse()) + mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.EndPoint = "http://0.0.0.0:80" + Expect(mockCtlr.RequestHandler.getPrimaryClusterHealthStatusFromHTTPSEndPoint()).To(BeFalse()) + mockCtlr.RequestHandler.PrimaryClusterHealthProbeParams.EndPoint = "https://" + Expect(mockCtlr.RequestHandler.getPrimaryClusterHealthStatusFromHTTPSEndPoint()).To(BeFalse()) + }) It("Check Primary Cluster HealthProbe with invalid http endpoint", func() { Expect(mockCtlr.RequestHandler.getPrimaryClusterHealthStatusFromHTTPEndPoint()).To(BeFalse()) diff --git a/pkg/controller/node_poll_handler_test.go b/pkg/controller/node_poll_handler_test.go index 9a0a3a444..28e7830ef 100644 --- a/pkg/controller/node_poll_handler_test.go +++ b/pkg/controller/node_poll_handler_test.go @@ -420,6 +420,7 @@ var _ = Describe("Node Poller Handler", func() { } mockCtlr.SetupNodeProcessing("") + mockCtlr.processBlockAffinities("") mockWriter, ok = mockCtlr.PrimaryBigIPWorker.ConfigWriter.(*test.MockWriter) Expect(ok).To(Equal(true)) Expect(len(mockWriter.Sections)).To(Equal(1)) diff --git a/pkg/controller/requestHandler_test.go b/pkg/controller/requestHandler_test.go new file mode 100644 index 000000000..4085bcc35 --- /dev/null +++ b/pkg/controller/requestHandler_test.go @@ -0,0 +1,299 @@ +package controller + +import ( + "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/test" + . "github.com/onsi/ginkgo/v2" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var _ = Describe("Request Handler Tests", func() { + var mockRequestHandler *RequestHandler + var mockBaseAPIHandler *BaseAPIHandler + var agent *Agent + BeforeEach(func() { + mockWriter := &test.MockWriter{FailStyle: test.Success} + mockRequestHandler = newMockRequestHandler(mockWriter) + mockBaseAPIHandler = newMockBaseAPIHandler() + agent = &Agent{ + StopChan: make(chan struct{}), + APIHandler: &APIHandler{ + LTM: <MAPIHandler{ + BaseAPIHandler: mockBaseAPIHandler, + }, + GTM: >MAPIHandler{ + BaseAPIHandler: mockBaseAPIHandler, + }, + }, + EventChan: make(chan interface{}), + } + }) + It("Test Request handler routine", func() { + mem1 := PoolMember{ + Address: "1.2.3.5", + Port: 8080, + } + mem2 := PoolMember{ + Address: "1.2.3.6", + Port: 8081, + } + rsCfg := &ResourceConfig{} + rsCfg.MetaData.Active = true + rsCfg.MetaData.ResourceType = VirtualServer + rsCfg.Virtual.Name = "crd_vs_172.13.14.15" + rsCfg.Virtual.PoolName = "default_pool_svc1" + rsCfg.Virtual.Destination = "/test/172.13.14.5:8080" + rsCfg.Virtual.AllowVLANs = []string{"flannel_vxlan"} + rsCfg.Virtual.IpIntelligencePolicy = "/Common/ip-intelligence-policy" + rsCfg.Virtual.HTTPCompressionProfile = "/Common/compressionProfile" + rsCfg.Virtual.BigIPRouteDomain = 10 + rsCfg.Virtual.AdditionalVirtualAddresses = []string{"172.13.14.17", "172.13.14.18"} + rsCfg.Virtual.ProfileAdapt = ProfileAdapt{"/Common/example-requestadapt", "/Common/example-responseadapt"} + rsCfg.Virtual.Policies = []nameRef{ + { + Name: "policy1", + Partition: "test", + }, + { + Name: "policy2", + Partition: "test", + }, + } + rsCfg.Pools = Pools{ + Pool{ + Name: "pool1", + MinimumMonitors: intstr.IntOrString{Type: 1, StrVal: "all"}, + Members: []PoolMember{mem1, mem2}, + MonitorNames: []MonitorName{ + {Name: "/test/http_monitor"}, + }, + }, + } + rsCfg.Virtual.IRules = []string{"none", "/common/irule1", "/common/irule_2", "/common/http_redirect_irule"} + rsCfg.IRulesMap = IRulesMap{ + NameRef{"custom_iRule", DEFAULT_PARTITION}: &IRule{ + Name: "custom_iRule", + Partition: DEFAULT_PARTITION, + Code: "tcl code blocks", + }, + NameRef{HttpRedirectIRuleName, DEFAULT_PARTITION}: &IRule{ + Name: HttpRedirectIRuleName, + Partition: DEFAULT_PARTITION, + Code: "tcl code blocks", + }, + } + rsCfg.IntDgMap = InternalDataGroupMap{ + NameRef{"static-internal-dg", "test"}: DataGroupNamespaceMap{ + "intDg1": &InternalDataGroup{ + Name: "static-internal-dg", + Partition: "test", + Type: "string", + Records: []InternalDataGroupRecord{ + { + Name: "apiTye", + Data: AS3, + }, + }, + }, + }, + } + rsCfg.Policies = Policies{ + Policy{ + Name: "policy1", + Strategy: "first-match", + Rules: Rules{ + &Rule{ + Conditions: []*condition{ + { + Values: []string{"test.com"}, + Equals: true, + }, + }, + Actions: []*action{ + { + Forward: true, + Request: true, + Redirect: true, + HTTPURI: true, + HTTPHost: true, + Pool: "default_svc_1", + }, + }, + }, + }, + }, + Policy{ + Name: "policy2", + Strategy: "first-match", + Rules: Rules{ + &Rule{ + Conditions: []*condition{ + { + Host: true, + Values: []string{"prod.com"}, + Equals: true, + HTTPHost: true, + Request: true, + }, + { + PathSegment: true, + Index: 1, + HTTPURI: true, + Equals: true, + Values: []string{"/foo"}, + Request: true, + }, + }, + Actions: []*action{ + { + Forward: true, + Request: true, + Redirect: true, + HTTPURI: true, + HTTPHost: true, + Pool: "default_svc_2", + Log: true, + Location: PrimaryBigIP, + Message: "log action", + Replace: true, + Value: "urihost", + WAF: true, + Policy: "/common/policy3", + Enabled: true, + Drop: true, + PersistMethod: SourceAddress, + }, + { + PersistMethod: DestinationAddress, + }, + { + PersistMethod: CookieHash, + }, + { + PersistMethod: CookieInsert, + }, + { + PersistMethod: CookieRewrite, + }, + { + PersistMethod: CookiePassive, + }, + { + PersistMethod: Universal, + }, + { + PersistMethod: Carp, + }, + { + PersistMethod: Hash, + }, + { + PersistMethod: Disable, + }, + { + PersistMethod: "Disable", + }, + }, + }, + }, + }, + Policy{ + Name: "policy3", + Strategy: "first-match", + Rules: Rules{ + &Rule{ + Conditions: []*condition{ + { + Path: true, + Name: "condition3", + Values: []string{"/common/test"}, + HTTPURI: true, + Equals: true, + Index: 3, + }, + { + Tcp: true, + Address: true, + Values: []string{"10.10.10.10"}, + Request: true, + }, + }, + Actions: []*action{ + { + Forward: true, + Request: true, + Redirect: true, + HTTPURI: true, + HTTPHost: true, + Pool: "default_svc_2", + }, + }, + }, + }, + }, + } + rsCfg.Monitors = Monitors{ + { + Name: "http_monitor", + Interval: 10, + Type: "http", + TargetPort: 8080, + Timeout: 10, + Send: "GET /health", + }, + { + Name: "https_monitor", + Interval: 10, + Type: "https", + TargetPort: 8443, + Timeout: 10, + Send: "GET /health", + }, + { + Name: "tcp_monitor", + Interval: 10, + Type: "tcp", + TargetPort: 3600, + Timeout: 10, + Send: "GET /health", + }, + } + + rsCfg.Virtual.Profiles = ProfileRefs{ + ProfileRef{ + Name: "serverssl", + Partition: "Common", + Context: "serverside", + }, + ProfileRef{ + Name: "serversslnew", + Context: "serverside", + }, + ProfileRef{ + Name: "clientssl", + Partition: "Common", + Context: "clientside", + }, + ProfileRef{ + Name: "clientsslnew", + Context: "clientside", + }, + } + zero := 0 + config := ResourceConfigRequest{ + ltmConfig: make(LTMConfig), + shareNodes: true, + gtmConfig: GTMConfig{}, + defaultRouteDomain: 1, + } + config.ltmConfig["default"] = &PartitionConfig{ResourceMap: make(ResourceMap), Priority: &zero} + config.ltmConfig["default"].ResourceMap["crd_vs_172.13.14.15"] = rsCfg + go mockRequestHandler.requestHandler() + mockRequestHandler.reqChan <- config + }) + AfterEach(func() { + close(agent.EventChan) + close(agent.StopChan) + close(mockRequestHandler.reqChan) + close(mockRequestHandler.respChan) + }) +}) diff --git a/pkg/controller/resourceConfig.go b/pkg/controller/resourceConfig.go index 7becb1dd1..c10c10672 100644 --- a/pkg/controller/resourceConfig.go +++ b/pkg/controller/resourceConfig.go @@ -2791,22 +2791,22 @@ func (ctlr *Controller) prepareRSConfigFromIngressLink( } -func (ctlr *Controller) isSinglePoolRatioEnabled(ts *cisapiv1.TransportServer) bool { - if ts.Spec.Pool.Weight != nil { - return true - } - for _, svc := range ts.Spec.Pool.AlternateBackends { - if svc.Weight != nil { - return true - } - } - for _, svc := range ts.Spec.Pool.MultiClusterServices { - if svc.Weight != nil { - return true - } - } - return false -} +//func (ctlr *Controller) isSinglePoolRatioEnabled(ts *cisapiv1.TransportServer) bool { +// if ts.Spec.Pool.Weight != nil { +// return true +// } +// for _, svc := range ts.Spec.Pool.AlternateBackends { +// if svc.Weight != nil { +// return true +// } +// } +// for _, svc := range ts.Spec.Pool.MultiClusterServices { +// if svc.Weight != nil { +// return true +// } +// } +// return false +//} // Prepares resource config based on VirtualServer resource config func (ctlr *Controller) prepareRSConfigFromLBService( diff --git a/pkg/controller/resourceConfig_test.go b/pkg/controller/resourceConfig_test.go index 029c2c4d7..e87ee4a1a 100644 --- a/pkg/controller/resourceConfig_test.go +++ b/pkg/controller/resourceConfig_test.go @@ -1994,6 +1994,13 @@ var _ = Describe("Resource Config Tests", func() { Reference: ServiceRef, Service: "svc1", ServicePort: intstr.IntOrString{IntVal: 80}, + Monitors: []cisapiv1.Monitor{ + { + Type: "HTTP", + Interval: 5, + Timeout: 10, + }, + }, } rsRef := resourceRef{ name: "test-vs", diff --git a/pkg/controller/responseHandler_test.go b/pkg/controller/responseHandler_test.go new file mode 100644 index 000000000..5139abc8e --- /dev/null +++ b/pkg/controller/responseHandler_test.go @@ -0,0 +1,302 @@ +package controller + +import ( + cisapiv1 "github.com/F5Networks/k8s-bigip-ctlr/v2/config/apis/cis/v1" + crdfake "github.com/F5Networks/k8s-bigip-ctlr/v2/config/client/clientset/versioned/fake" + cisinfv1 "github.com/F5Networks/k8s-bigip-ctlr/v2/config/client/informers/externalversions/cis/v1" + "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/teem" + "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/test" + . "github.com/onsi/ginkgo/v2" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("Response Handler Tests", func() { + var mockCtlr *mockController + var postConfig *agentPostConfig + var tenantResponseMap map[string]tenantResponse + var vs *cisapiv1.VirtualServer + var ts *cisapiv1.TransportServer + var il *cisapiv1.IngressLink + var mcVs *cisapiv1.VirtualServer + var mcTs *cisapiv1.TransportServer + var svc *v1.Service + namespace := "default" + + BeforeEach(func() { + vs = test.NewVirtualServer( + "SampleVS", + namespace, + cisapiv1.VirtualServerSpec{ + Host: "test.com", + VirtualServerAddress: "1.2.3.5", + Pools: []cisapiv1.VSPool{ + cisapiv1.VSPool{ + Path: "/path", + Service: "svc", + ServicePort: intstr.IntOrString{IntVal: 80}, + }, + }, + }) + mcVs = test.NewVirtualServer( + "SampleMCVS", + namespace, + cisapiv1.VirtualServerSpec{ + Host: "test.com", + VirtualServerAddress: "1.2.3.7", + Pools: []cisapiv1.VSPool{ + cisapiv1.VSPool{ + MultiClusterServices: []cisapiv1.MultiClusterServiceReference{ + { + SvcName: "svc", + Namespace: "default", + ServicePort: intstr.IntOrString{IntVal: 80}, + }, + }, + }, + }, + }) + ts = test.NewTransportServer( + "SampleTS", + namespace, + cisapiv1.TransportServerSpec{ + SNAT: "auto", + VirtualServerAddress: "1.2.3.6", + }, + ) + mcTs = test.NewTransportServer( + "SampleMCTS", + namespace, + cisapiv1.TransportServerSpec{ + SNAT: "auto", + Pool: cisapiv1.TSPool{ + MultiClusterServices: []cisapiv1.MultiClusterServiceReference{ + { + SvcName: "svc", + Namespace: "default", + ServicePort: intstr.IntOrString{IntVal: 80}, + }, + }, + }, + }) + label := make(map[string]string) + label["app"] = "ingresslink" + selector := &metav1.LabelSelector{ + MatchLabels: label, + } + iRules := []string{"dummyiRule"} + il = test.NewIngressLink( + "SampleIL", + namespace, + "1", + cisapiv1.IngressLinkSpec{ + VirtualServerAddress: "1.2.3.4", + Selector: selector, + IRules: iRules, + }, + ) + svc = test.NewService( + "svc", + "1", + namespace, + v1.ServiceTypeClusterIP, + []v1.ServicePort{ + { + Port: 80, + Name: "port0", + }, + }, + ) + tenantDeclMap := make(map[string]as3Tenant) + tenantResponseMap = make(map[string]tenantResponse) + tenantResponseMap["test"] = tenantResponse{} + tenantResponseMap["test1"] = tenantResponse{} + tenantDeclMap["test"] = as3Tenant{ + "class": "Tenant", + "defaultRouteDomain": 0, + as3SharedApplication: "shared", + "label": "cis2.x", + } + mockCtlr = newMockController() + mockCtlr.multiClusterHandler = NewClusterHandler("") + mockWriter := &test.MockWriter{ + FailStyle: test.Success, + Sections: make(map[string]interface{}), + } + mockCtlr.RequestHandler = newMockRequestHandler(mockWriter) + mockCtlr.RequestHandler.PrimaryBigIPWorker.disableARP = false + go mockCtlr.multiClusterHandler.ResourceEventWatcher() + // Handles the resource status updates + go mockCtlr.multiClusterHandler.ResourceStatusUpdater() + mockCtlr.Partition = "test" + mockCtlr.multiClusterHandler.ClusterConfigs[""] = newClusterConfig() + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeClient = k8sfake.NewSimpleClientset(svc) + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient = crdfake.NewSimpleClientset(vs) + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient = crdfake.NewSimpleClientset(ts) + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient = crdfake.NewSimpleClientset(il) + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient = crdfake.NewSimpleClientset(mcVs) + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient = crdfake.NewSimpleClientset(mcTs) + mockCtlr.mode = CustomResourceMode + mockCtlr.globalExtendedCMKey = "kube-system/global-cm" + mockCtlr.multiClusterHandler.ClusterConfigs[""].InformerStore = initInformerStore() + mockCtlr.multiClusterHandler.ClusterConfigs[""].nativeResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) + mockCtlr.multiClusterHandler.customResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) + _ = mockCtlr.addNamespacedInformers("default", false, "") + mockCtlr.resourceQueue = workqueue.NewNamedRateLimitingQueue( + workqueue.DefaultControllerRateLimiter(), "custom-resource-controller") + mockCtlr.TeemData = &teem.TeemsData{ + ResourceType: teem.ResourceTypes{ + VirtualServer: make(map[string]int), + }, + } + mockCtlr.resources = NewResourceStore() + mockCtlr.multiClusterResources = newMultiClusterResourceStore() + mockCtlr.multiClusterHandler.ClusterConfigs[""].crInformers["default"].vsInformer = cisinfv1.NewFilteredVirtualServerInformer( + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient, + namespace, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + func(options *metav1.ListOptions) { + options.LabelSelector = mockCtlr.multiClusterHandler.ClusterConfigs[""].nativeResourceSelector.String() + }, + ) + mockCtlr.multiClusterHandler.ClusterConfigs[""].crInformers["default"].ilInformer = cisinfv1.NewFilteredIngressLinkInformer( + mockCtlr.multiClusterHandler.ClusterConfigs[""].kubeCRClient, + namespace, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + func(options *metav1.ListOptions) { + options.LabelSelector = mockCtlr.multiClusterHandler.ClusterConfigs[""].nativeResourceSelector.String() + }, + ) + mockCtlr.webhookServer = &mockWebHookServer{} + mockCtlr.ResourceStatusVSAddressMap = make(map[resourceRef]string) + mockCtlr.addVirtualServer(vs) + mockCtlr.addTransportServer(ts) + mockCtlr.addIngressLink(il) + mem1 := PoolMember{ + Address: "1.2.3.5", + Port: 8080, + } + mem2 := PoolMember{ + Address: "1.2.3.6", + Port: 8081, + } + rsCfg := &ResourceConfig{} + rsCfg.MetaData.Active = true + rsCfg.Pools = Pools{ + Pool{ + Name: "pool1", + Members: []PoolMember{mem1, mem2}, + }, + } + rsCfg.Virtual.Name = formatCustomVirtualServerName("My_VS", 80) + ltmConfig := make(LTMConfig) + zero := 0 + ltmConfig["default"] = &PartitionConfig{ResourceMap: make(ResourceMap), Priority: &zero} + ltmConfig["default"].ResourceMap[rsCfg.Virtual.Name] = rsCfg + monitors := []Monitor{ + { + Name: "pool1_monitor", + Interval: 10, + Timeout: 10, + Type: "http", + Send: "GET /health", + }, + } + gtmConfig := GTMConfig{ + DEFAULT_PARTITION: GTMPartitionConfig{ + WideIPs: map[string]WideIP{ + "test.com": { + DomainName: "test.com", + RecordType: "A", + LBMethod: "round-robin", + Pools: []GSLBPool{ + { + Name: "pool1", + RecordType: "A", + LBMethod: "round-robin", + Members: []string{"vs1", "vs2"}, + Monitors: monitors, + }, + }, + }, + }, + }, + } + rsConfigRequest := ResourceConfigRequest{ + ltmConfig: ltmConfig, + gtmConfig: gtmConfig, + } + postConfig = &agentPostConfig{ + reqMeta: requestMeta{ + id: 1, + }, + as3APIURL: "https://127.0.0.1/mgmt/shared/appsvcs/declare", + data: `{"class": "AS3", "declaration": {"class": "ADC", "test": {"class": "Tenant", "testApp": {"class": "Application", "webcert":{"class": "Certificate", "certificate": "abc", "privateKey": "abc", "chainCA": "abc"}}}}}`, + incomingTenantDeclMap: tenantDeclMap, + tenantResponseMap: make(map[string]tenantResponse), + agentKind: PrimaryBigIP, + rscConfigRequest: rsConfigRequest, + } + postConfig.reqMeta.partitionMap = make(map[string]map[string]string) + postConfig.reqMeta.partitionMap["test"] = make(map[string]string) + postConfig.reqMeta.partitionMap["test"]["default/SampleVS"] = VirtualServer + postConfig.reqMeta.partitionMap["test"]["default/SampleTS"] = TransportServer + postConfig.reqMeta.partitionMap["test"]["default/SampleIL"] = IngressLink + + }) + It("Resource Status update tests for VS, TS and IL", func() { + go mockCtlr.responseHandler() + mockCtlr.respChan <- postConfig + }) + It("Resource Status update tests for VS, TS and IL for failed tenants with CCCL GTM", func() { + postConfig.failedTenants = make(map[string]tenantResponse) + postConfig.timeout = 30 + mockCtlr.requestCounter = 1 + postConfig.failedTenants["test"] = tenantResponse{ + message: "failed", + agentResponseCode: 500, + } + go mockCtlr.responseHandler() + mockCtlr.respChan <- postConfig + }) + It("Resource Status update tests for VS, TS and IL for GTM Config", func() { + postConfig.failedTenants = make(map[string]tenantResponse) + mockCtlr.RequestHandler.PrimaryBigIPWorker.ccclGTMAgent = true + postConfig.timeout = 30 + mockCtlr.requestCounter = 1 + postConfig.failedTenants = make(map[string]tenantResponse) + // postConfig.failedTenants["test"] = tenantResponse{ + // message: "failed", + // agentResponseCode: 500, + // } + go mockCtlr.responseHandler() + mockCtlr.respChan <- postConfig + }) + It("Resource Status update tests for VS, TS and IL for multicluster Standalone mode discovery", func() { + mockCtlr.multiClusterMode = PrimaryCIS + mockCtlr.discoveryMode = StandAloneCIS + mockCtlr.addVirtualServer(mcVs) + mockCtlr.addTransportServer(mcTs) + postConfig.failedTenants = make(map[string]tenantResponse) + mockCtlr.RequestHandler.PrimaryBigIPWorker.ccclGTMAgent = false + postConfig.timeout = 30 + mockCtlr.requestCounter = 1 + postConfig.failedTenants = make(map[string]tenantResponse) + // postConfig.failedTenants["test"] = tenantResponse{ + // message: "failed", + // agentResponseCode: 500, + // } + go mockCtlr.responseHandler() + mockCtlr.respChan <- postConfig + }) + AfterEach(func() { + close(mockCtlr.respChan) + }) +}) diff --git a/pkg/controller/worker_test.go b/pkg/controller/worker_test.go index dc586204d..910f0ac2a 100644 --- a/pkg/controller/worker_test.go +++ b/pkg/controller/worker_test.go @@ -1132,6 +1132,92 @@ var _ = Describe("Worker Tests", func() { "Invalid Resource Config") }) + It("Processing IngressLink for multicluster default discovery mode", func() { + mockCtlr.multiClusterMode = PrimaryBigIP + mockCtlr.discoveryMode = DefaultMode + fooPorts := []v1.ServicePort{ + { + Port: 443, + Name: "port0", + }, + } + foo := test.NewService("foo", "1", namespace, v1.ServiceTypeClusterIP, fooPorts) + label1 := make(map[string]string) + label1["app"] = "ingresslink" + foo.ObjectMeta.Labels = label1 + var ( + selctor = &metav1.LabelSelector{ + MatchLabels: label1, + } + ) + var iRules []string + IngressLink1 := test.NewIngressLink("ingresslink1", namespace, "1", + cisapiv1.IngressLinkSpec{ + Host: "test.com", + VirtualServerAddress: "1.2.3.4", + Selector: selctor, + IRules: iRules, + MultiClusterServices: []cisapiv1.MultiClusterServiceReference{ + { + ClusterName: "", + SvcName: "foo", + Namespace: "default", + }, + }, + }) + _ = mockCtlr.multiClusterHandler.ClusterConfigs[""].crInformers["default"].ilInformer.GetIndexer().Add(IngressLink1) + mockCtlr.TeemData = &teem.TeemsData{ + ResourceType: teem.ResourceTypes{ + IngressLink: make(map[string]int), + }, + } + _ = mockCtlr.multiClusterHandler.ClusterConfigs[""].comInformers["default"].svcInformer.GetIndexer().Add(foo) + err := mockCtlr.processIngressLink(IngressLink1, false) + Expect(err).To(BeNil(), "Failed to process IngressLink while deletion") + }) + It("Processing IngressLink for multicluster active-active discovery mode", func() { + mockCtlr.multiClusterMode = PrimaryBigIP + mockCtlr.discoveryMode = Active + fooPorts := []v1.ServicePort{ + { + Port: 443, + Name: "port0", + }, + } + foo := test.NewService("foo", "1", namespace, v1.ServiceTypeClusterIP, fooPorts) + label1 := make(map[string]string) + label1["app"] = "ingresslink" + foo.ObjectMeta.Labels = label1 + var ( + selctor = &metav1.LabelSelector{ + MatchLabels: label1, + } + ) + var iRules []string + IngressLink1 := test.NewIngressLink("ingresslink1", namespace, "1", + cisapiv1.IngressLinkSpec{ + Host: "test.com", + VirtualServerAddress: "1.2.3.4", + Selector: selctor, + IRules: iRules, + MultiClusterServices: []cisapiv1.MultiClusterServiceReference{ + { + ClusterName: "", + SvcName: "foo", + Namespace: "default", + }, + }, + }) + _ = mockCtlr.multiClusterHandler.ClusterConfigs[""].crInformers["default"].ilInformer.GetIndexer().Add(IngressLink1) + mockCtlr.TeemData = &teem.TeemsData{ + ResourceType: teem.ResourceTypes{ + IngressLink: make(map[string]int), + }, + } + _ = mockCtlr.multiClusterHandler.ClusterConfigs[""].comInformers["default"].svcInformer.GetIndexer().Add(foo) + err := mockCtlr.processIngressLink(IngressLink1, false) + Expect(err).To(BeNil(), "Failed to process IngressLink while deletion") + }) }) It("get node port", func() {