Skip to content

Commit

Permalink
Add Feature Service Type LoadBalancer with IPAM (F5Networks#1773)
Browse files Browse the repository at this point in the history
Signed-off-by: Subba Reddy Veeramreddy <[email protected]>
  • Loading branch information
subbuv26 authored Apr 8, 2021
1 parent 47a72cb commit ef8578a
Show file tree
Hide file tree
Showing 6 changed files with 299 additions and 53 deletions.
3 changes: 2 additions & 1 deletion docs/RELEASE-NOTES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ Added Functionality
* Integrated the ingress link mode with CRD mode
* Added implicit Health Monitor for ingress link resource
* :issues:`1573` Added support for type UDP Transport Server CRD
* CIS supports IP address assignment to Transport Server CRD using `F5 IPAM Controller <https://github.com/F5Networks/f5-ipam-controller/releases>`_. Refer for `Examples <https://github.com/F5Networks/f5-ipam-controller/blob/main/README.md>`_.
* CIS supports IP address assignment to Transport Server CRD using `F5 IPAM Controller <https://github.com/F5Networks/f5-ipam-controller/releases>`_. Refer for `Examples <https://github.com/F5Networks/f5-ipam-controller/blob/main/README.md>`_.
* Added Service Type LoadBalancer support in CRD Mode

Bug Fixes
`````````
Expand Down
18 changes: 18 additions & 0 deletions docs/_static/config_examples/example-service-type-lb.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
annotations:
cis.f5.com/ipamLabel: test
labels:
app: svc-lb1
name: svc-lb1
namespace: default
spec:
ports:
- name: svc-lb1-80
port: 80
protocol: TCP
targetPort: 80
selector:
app: svc-lb1
type: LoadBalancer
2 changes: 2 additions & 0 deletions pkg/crmanager/crManager.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ const (
// HTTP Events for LTM Policy
HTTPRequest = "HTTPRequest"
TLSClientHello = "TLSClientHello"

LBServiceIPAMLabelAnnotation = "cis.f5.com/ipamLabel"
)

// NewCRManager creates a new CRManager Instance.
Expand Down
120 changes: 76 additions & 44 deletions pkg/crmanager/informers.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package crmanager

import (
"fmt"
"reflect"
"time"

ficV1 "github.com/F5Networks/f5-ipam-controller/pkg/ipamapis/apis/fic/v1"
Expand All @@ -30,6 +31,8 @@ import (
"k8s.io/client-go/tools/cache"
)

var K8SCoreServices = [...]string{"kube-dns", "kube-scheduler", "kube-controller-manager", "docker-registry", "kubernetes", "registry-console", "router", "kubelet", "console", "alertmanager-main", "alertmanager-operated", "cluster-monitoring-operator", "grafana", "kube-state-metrics", "node-exporter", "prometheus-k8s", "prometheus-operated", "prometheus-operatorwebconsole"}

// start the VirtualServer informer
func (crInfr *CRInformer) start() {
var cacheSyncs []cache.InformerSynced
Expand Down Expand Up @@ -204,7 +207,7 @@ func (crMgr *CRManager) addEventHandlers(crInf *CRInformer) {
if crInf.tlsInformer != nil {
crInf.tlsInformer.AddEventHandler(
&cache.ResourceEventHandlerFuncs{
// AddFunc: func(obj interface{}) { crMgr.enqueueTLSServer(obj) },
AddFunc: func(obj interface{}) { crMgr.enqueueTLSServer(obj) },
UpdateFunc: func(old, cur interface{}) { crMgr.enqueueTLSServer(cur) },
// DeleteFunc: func(obj interface{}) { crMgr.enqueueTLSServer(obj) },
},
Expand Down Expand Up @@ -243,23 +246,17 @@ func (crMgr *CRManager) addEventHandlers(crInf *CRInformer) {
if crInf.svcInformer != nil {
crInf.svcInformer.AddEventHandler(
&cache.ResourceEventHandlerFuncs{
// Ignore AddFunc for service as we dont bother about services until they are
// mapped to VirtualServer. Any new service added and mapped to a VirtualServer
// will be handled in the VirtualServer Informer AddFunc.
// AddFunc: func(obj interface{}) { crMgr.enqueueService(obj) },
UpdateFunc: func(obj, cur interface{}) { crMgr.enqueueService(cur) },
DeleteFunc: func(obj interface{}) { crMgr.enqueueService(obj) },
AddFunc: func(obj interface{}) { crMgr.enqueueService(obj) },
UpdateFunc: func(obj, cur interface{}) { crMgr.enqueueUpdatedService(obj, cur) },
DeleteFunc: func(obj interface{}) { crMgr.enqueueDeletedService(obj) },
},
)
}

if crInf.epsInformer != nil {
crInf.epsInformer.AddEventHandler(
&cache.ResourceEventHandlerFuncs{
// Ignore AddFunc for endpoint as we dont bother about endpoints until they are
// mapped to VirtualServer. Any new endpoint added and mapped to a Service
// will be handled in the Service Informer AddFunc.
// AddFunc: func(obj interface{}) { crMgr.enqueueEndpoints(obj) },
AddFunc: func(obj interface{}) { crMgr.enqueueEndpoints(obj) },
UpdateFunc: func(obj, cur interface{}) { crMgr.enqueueEndpoints(cur) },
DeleteFunc: func(obj interface{}) { crMgr.enqueueEndpoints(obj) },
},
Expand Down Expand Up @@ -292,18 +289,9 @@ func (crMgr *CRManager) enqueueUpdatedIPAM(oldObj, newObj interface{}) {
oldIpam := oldObj.(*ficV1.F5IPAM)
curIpam := newObj.(*ficV1.F5IPAM)

log.Infof("Enqueueing Old IPAM: %v", oldIpam)
// if oldIpam.Spec.HostSpecs != curIpam.Spec.HostSpecs {
// key := &rqKey{
// namespace: oldIpam.ObjectMeta.Namespace,
// kind: IPAM,
// rscName: oldIpam.ObjectMeta.Name,
// rsc: oldObj,
// rscDelete: true,
// }

// crMgr.rscQueue.Add(key)
// }
if reflect.DeepEqual(oldIpam.Status, curIpam.Status) {
return
}

log.Infof("Enqueueing Updated IPAM: %v", curIpam)
key := &rqKey{
Expand Down Expand Up @@ -578,48 +566,92 @@ func (crMgr *CRManager) enqueueDeletedExternalDNS(obj interface{}) {
}

func (crMgr *CRManager) enqueueService(obj interface{}) {
flag := true
svc := obj.(*corev1.Service)
// Ignore K8S Core Services
for _, svcName := range K8SCoreServices {
if svc.ObjectMeta.Name == svcName {
return
}
}
log.Debugf("Enqueueing Service: %v", svc)
ignoresvcList := []string{"kube-dns", "kube-scheduler", "kube-controller-manager", "docker-registry", "kubernetes", "registry-console", "router", "kubelet", "console", "alertmanager-main", "alertmanager-operated", "cluster-monitoring-operator", "grafana", "kube-state-metrics", "node-exporter", "prometheus-k8s", "prometheus-operated", "prometheus-operatorwebconsole"}
for _, svcName := range ignoresvcList {
key := &rqKey{
namespace: svc.ObjectMeta.Namespace,
kind: Service,
rscName: svc.ObjectMeta.Name,
rsc: obj,
}
crMgr.rscQueue.Add(key)
}

func (crMgr *CRManager) enqueueUpdatedService(obj, cur interface{}) {
svc := obj.(*corev1.Service)
curSvc := cur.(*corev1.Service)
// Ignore K8S Core Services
for _, svcName := range K8SCoreServices {
if svc.ObjectMeta.Name == svcName {
flag = false
break
return
}
}
if flag {

if (svc.Spec.Type != curSvc.Spec.Type && svc.Spec.Type == corev1.ServiceTypeLoadBalancer) ||
(svc.Annotations[LBServiceIPAMLabelAnnotation] != curSvc.Annotations[LBServiceIPAMLabelAnnotation]) {
log.Debugf("Enqueueing Old Service: %v", svc)
key := &rqKey{
namespace: svc.ObjectMeta.Namespace,
kind: Service,
rscName: svc.ObjectMeta.Name,
rsc: obj,
rscDelete: true,
}
crMgr.rscQueue.Add(key)
}

log.Debugf("Enqueueing Updated Service: %v", curSvc)
key := &rqKey{
namespace: curSvc.ObjectMeta.Namespace,
kind: Service,
rscName: curSvc.ObjectMeta.Name,
rsc: cur,
}
crMgr.rscQueue.Add(key)
}

func (crMgr *CRManager) enqueueDeletedService(obj interface{}) {
svc := obj.(*corev1.Service)
// Ignore K8S Core Services
for _, svcName := range K8SCoreServices {
if svc.ObjectMeta.Name == svcName {
return
}
}
log.Debugf("Enqueueing Service: %v", svc)
key := &rqKey{
namespace: svc.ObjectMeta.Namespace,
kind: Service,
rscName: svc.ObjectMeta.Name,
rsc: obj,
rscDelete: true,
}
crMgr.rscQueue.Add(key)
}

func (crMgr *CRManager) enqueueEndpoints(obj interface{}) {
flag := true
eps := obj.(*corev1.Endpoints)
log.Debugf("Enqueueing Endpoints: %v", eps)
ignoreeplist := []string{"kube-dns", "kube-scheduler", "kube-controller-manager", "docker-registry", "kubernetes", "registry-console", "router", "kubelet", "console", "alertmanager-main", "alertmanager-operated", "cluster-monitoring-operator", "grafana", "kube-state-metrics", "node-exporter", "prometheus-k8s", "prometheus-operated", "prometheus-operatorwebconsole"}
for _, epname := range ignoreeplist {
// Ignore K8S Core Services
for _, epname := range K8SCoreServices {
if eps.ObjectMeta.Name == epname {
flag = false
break
return
}
}
if flag {
key := &rqKey{
namespace: eps.ObjectMeta.Namespace,
kind: Endpoints,
rscName: eps.ObjectMeta.Name,
rsc: obj,
}

crMgr.rscQueue.Add(key)
log.Debugf("Enqueueing Endpoints: %v", eps)
key := &rqKey{
namespace: eps.ObjectMeta.Namespace,
kind: Endpoints,
rscName: eps.ObjectMeta.Name,
rsc: obj,
}

crMgr.rscQueue.Add(key)
}

func (nsInfr *NSInformer) start() {
Expand Down
30 changes: 28 additions & 2 deletions pkg/crmanager/resourceConfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

cisapiv1 "github.com/F5Networks/k8s-bigip-ctlr/config/apis/cis/v1"
v1 "github.com/F5Networks/k8s-bigip-ctlr/config/apis/cis/v1"
log "github.com/F5Networks/k8s-bigip-ctlr/pkg/vlogger"
v1 "k8s.io/api/core/v1"
)

// NewResources is Constructor for Resources
Expand Down Expand Up @@ -1059,7 +1059,7 @@ func (crMgr *CRManager) handleDataGroupIRules(
rsCfg *ResourceConfig,
virtualName string,
vsHost string,
tls *v1.TLSProfile,
tls *cisapiv1.TLSProfile,
) {
// For https
if nil != tls {
Expand Down Expand Up @@ -1149,6 +1149,32 @@ func (crMgr *CRManager) prepareRSConfigFromTransportServer(
return nil
}

// Prepares resource config based on VirtualServer resource config
func (crMgr *CRManager) prepareRSConfigFromLBService(
rsCfg *ResourceConfig,
svc *v1.Service,
) error {

poolName := formatVirtualServerPoolName(
svc.Namespace,
svc.Name,
svc.Spec.Ports[0].Port,
"")
pool := Pool{
Name: poolName,
Partition: rsCfg.Virtual.Partition,
ServiceName: svc.Name,
ServicePort: svc.Spec.Ports[0].Port,
NodeMemberLabel: "",
}
rsCfg.Pools = Pools{pool}
rsCfg.Virtual.PoolName = poolName
rsCfg.Virtual.SNAT = DEFAULT_SNAT
rsCfg.Virtual.Mode = "standard"

return nil
}

func getRSCfgResName(rsVSName, resName string) string {
return fmt.Sprintf("%s_%s", rsVSName, resName)
}
Loading

0 comments on commit ef8578a

Please sign in to comment.