From 44486f88f242229f43d82509868ca45e3e252183 Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sat, 21 Oct 2023 14:19:31 -0700 Subject: [PATCH 1/8] test: service conforms tests --- controllers/topology/service_test.go | 468 +++++++++++++++++++++++++++ 1 file changed, 468 insertions(+) create mode 100644 controllers/topology/service_test.go diff --git a/controllers/topology/service_test.go b/controllers/topology/service_test.go new file mode 100644 index 00000000..be45f0b6 --- /dev/null +++ b/controllers/topology/service_test.go @@ -0,0 +1,468 @@ +package topology_test + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + + clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + k8scorev1 "k8s.io/api/core/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +func TestServiceConforms(t *testing.T) { + cases := []struct { + name string + existing *k8scorev1.Service + rendered *k8scorev1.Service + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "conforms", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + "someextraannotations": "extraisok", + }, + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + "clabernetes/app": "clabernetes", + "someextralabel": "extraisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + { + Name: "ssh-for-reasons", + Protocol: "TCP", + Port: 22, + TargetPort: intstr.IntOrString{ + IntVal: 22, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + "clabernetes/app": "clabernetes", + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + { + Name: "ssh-for-reasons", + Protocol: "TCP", + Port: 22, + TargetPort: intstr.IntOrString{ + IntVal: 22, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-selector", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Selector: map[string]string{ + "something": "something", + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Selector: map[string]string{ + "different": "different", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-type", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Type: "ClusterIP", + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Type: "NodePort", + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-port-number", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 99, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "no-matching-port-name", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "something-else", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "port-target-mismatch", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 99, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "missing-clabernetes-labels", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "clabernetes", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-labels-wrong-value", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "clabernetes", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-owner", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetescontrollerstopology.ServiceConforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} From 05ea7202be31701078d5a774dd53a3727552124e Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sat, 21 Oct 2023 15:19:43 -0700 Subject: [PATCH 2/8] test: basic deployment render test --- controllers/deployments.go | 4 + controllers/topology/deployment.go | 54 +- controllers/topology/deployment_test.go | 692 ++++++++++++++++++ controllers/topology/service_test.go | 8 +- .../deployment/render-deployment/simple.json | 96 +++ 5 files changed, 820 insertions(+), 34 deletions(-) create mode 100644 controllers/topology/deployment_test.go create mode 100755 controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json diff --git a/controllers/deployments.go b/controllers/deployments.go index 048a5782..03bd2751 100644 --- a/controllers/deployments.go +++ b/controllers/deployments.go @@ -36,6 +36,10 @@ func (r *ResolvedDeployments) CurrentDeploymentNames() []string { // ContainersEqual returns true if the existing container slice matches the rendered container slice // it ignores slice order. func ContainersEqual(existing, rendered []k8scorev1.Container) bool { + if len(existing) != len(rendered) { + return false + } + for existingIdx := range existing { var matched bool diff --git a/controllers/topology/deployment.go b/controllers/topology/deployment.go index b550ce43..d5c084bb 100644 --- a/controllers/topology/deployment.go +++ b/controllers/topology/deployment.go @@ -7,8 +7,6 @@ import ( "strings" "time" - clabernetesconfig "github.com/srl-labs/clabernetes/config" - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" @@ -150,7 +148,7 @@ func (r *Reconciler) enforceDeployments( r.Log.Info("creating missing deployments") for _, nodeName := range deployments.Missing { - deployment := renderDeployment( + deployment := r.RenderDeployment( obj, clabernetesConfigs, nodeName, @@ -190,7 +188,7 @@ func (r *Reconciler) enforceDeployments( deployment.Name, ) - expectedDeployment := renderDeployment( + expectedDeployment := r.RenderDeployment( obj, clabernetesConfigs, nodeName, @@ -201,7 +199,7 @@ func (r *Reconciler) enforceDeployments( return err } - if !deploymentConforms(deployment, expectedDeployment, obj.GetUID()) { + if !DeploymentConforms(deployment, expectedDeployment, obj.GetUID()) { r.Log.Debugf( "comparing existing deployment '%s/%s' spec does not conform to desired state, "+ "updating", @@ -268,12 +266,14 @@ func (r *Reconciler) restartDeploymentForNode( return r.Client.Update(ctx, nodeDeployment) } -func renderDeployment( +// RenderDeployment renders a k8sappsv1.Deployment object based on the given clabernetes topology +// object and clabernetes config mapping (sub-topologies) for the given node name. +func (r *Reconciler) RenderDeployment( obj clabernetesapistopologyv1alpha1.TopologyCommonObject, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, nodeName string, ) *k8sappsv1.Deployment { - globalAnnotations, globalLabels := clabernetesconfig.GetManager().GetAllMetadata() + globalAnnotations, globalLabels := r.ConfigManagerGetter().GetAllMetadata() name := obj.GetName() @@ -413,7 +413,7 @@ func renderDeployment( deployment = renderDeploymentAddInsecureRegistries(obj, deployment) - deployment = renderDeploymentAddResources(obj, clabernetesConfigs, nodeName, deployment) + deployment = r.renderDeploymentAddResources(obj, clabernetesConfigs, nodeName, deployment) return deployment } @@ -495,7 +495,7 @@ func renderDeploymentAddInsecureRegistries( return deployment } -func renderDeploymentAddResources( +func (r *Reconciler) renderDeploymentAddResources( obj clabernetesapistopologyv1alpha1.TopologyCommonObject, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, nodeName string, @@ -517,7 +517,7 @@ func renderDeploymentAddResources( return deployment } - resources := clabernetesconfig.GetManager().GetResourcesForContainerlabKind( + resources := r.ConfigManagerGetter().GetResourcesForContainerlabKind( clabernetesConfigs[nodeName].Topology.GetNodeKindType(nodeName), ) @@ -528,7 +528,10 @@ func renderDeploymentAddResources( return deployment } -func deploymentConforms( +// DeploymentConforms asserts if a given deployment conforms with a rendered deployment -- this +// isn't checking if the services are exactly the same, just checking that the parts clabernetes +// cares about are the same. +func DeploymentConforms( existingDeployment, renderedDeployment *k8sappsv1.Deployment, expectedOwnerUID apimachinerytypes.UID, @@ -562,39 +565,30 @@ func deploymentConforms( return false } - // this and labels will probably be a future us problem -- maybe some mutating webhooks will be - // adding labels or annotations that will cause us to continually reconcile, that would be lame - // ... we'll cross that bridge when we get there :) - if !reflect.DeepEqual( - existingDeployment.Spec.Template.ObjectMeta.Annotations, - renderedDeployment.Spec.Template.ObjectMeta.Annotations, + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Annotations, + renderedDeployment.ObjectMeta.Annotations, ) { return false } - if !reflect.DeepEqual( - existingDeployment.Spec.Template.ObjectMeta.Labels, - renderedDeployment.Spec.Template.ObjectMeta.Labels, + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Labels, + renderedDeployment.ObjectMeta.Labels, ) { return false } - if existingDeployment.ObjectMeta.Annotations == nil && - renderedDeployment.ObjectMeta.Annotations != nil { - // obviously our annotations don't exist, so we need to enforce that - return false - } - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Annotations, - renderedDeployment.ObjectMeta.Annotations, + existingDeployment.Spec.Template.ObjectMeta.Annotations, + renderedDeployment.Spec.Template.ObjectMeta.Annotations, ) { return false } if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Labels, - renderedDeployment.ObjectMeta.Labels, + existingDeployment.Spec.Template.ObjectMeta.Labels, + renderedDeployment.Spec.Template.ObjectMeta.Labels, ) { return false } diff --git a/controllers/topology/deployment_test.go b/controllers/topology/deployment_test.go new file mode 100644 index 00000000..1f739220 --- /dev/null +++ b/controllers/topology/deployment_test.go @@ -0,0 +1,692 @@ +package topology_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + + clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutil "github.com/srl-labs/clabernetes/util" + k8sappsv1 "k8s.io/api/apps/v1" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +const renderDeploymentTestName = "deployment/render-deployment" + +func TestDeploymentConforms(t *testing.T) { + cases := []struct { + name string + existing *k8sappsv1.Deployment + rendered *k8sappsv1.Deployment + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-replicas", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(100)), + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(1)), + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-selector", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "something": "something", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "something": "different", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-containers", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{}, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + Containers: []k8scorev1.Container{ + { + Name: "some-container", + }, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-service-account", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + ServiceAccountName: "something-else", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + ServiceAccountName: "default", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-restart-policy", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + RestartPolicy: "Never", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + RestartPolicy: "Always", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + + // object meta annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // object meta labels + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // template object meta annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // template object meta labels + + { + name: "missing-clabernetes-global-labels", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + { + name: "bad-owner", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetescontrollerstopology.DeploymentConforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} + +func TestRenderDeployment(t *testing.T) { + cases := []struct { + name string + obj clabernetesapistopologyv1alpha1.TopologyCommonObject + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + nodeName string + }{ + { + name: "simple", + obj: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopology.Reconciler{ + ResourceKind: "containerlab", + ConfigManagerGetter: clabernetesconfig.GetFakeManager, + } + + got := reconciler.RenderDeployment( + testCase.obj, + testCase.clabernetesConfigs, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + got, + ) + } + + var want k8sappsv1.Deployment + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} diff --git a/controllers/topology/service_test.go b/controllers/topology/service_test.go index be45f0b6..77b57d00 100644 --- a/controllers/topology/service_test.go +++ b/controllers/topology/service_test.go @@ -287,7 +287,7 @@ func TestServiceConforms(t *testing.T) { }, OwnerReferences: []metav1.OwnerReference{ { - UID: apimachinerytypes.UID("evil-imposter"), + UID: apimachinerytypes.UID("clabernetes-testing"), }, }, }, @@ -311,7 +311,7 @@ func TestServiceConforms(t *testing.T) { }, OwnerReferences: []metav1.OwnerReference{ { - UID: apimachinerytypes.UID("evil-imposter"), + UID: apimachinerytypes.UID("clabernetes-testing"), }, }, }, @@ -355,7 +355,7 @@ func TestServiceConforms(t *testing.T) { }, OwnerReferences: []metav1.OwnerReference{ { - UID: apimachinerytypes.UID("evil-imposter"), + UID: apimachinerytypes.UID("clabernetes-testing"), }, }, }, @@ -379,7 +379,7 @@ func TestServiceConforms(t *testing.T) { }, OwnerReferences: []metav1.OwnerReference{ { - UID: apimachinerytypes.UID("evil-imposter"), + UID: apimachinerytypes.UID("clabernetes-testing"), }, }, }, diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json new file mode 100755 index 00000000..0a3cb541 --- /dev/null +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json @@ -0,0 +1,96 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file From ebfff34641ec73b3c7f04ada05d5be1432727b74 Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sat, 21 Oct 2023 15:58:00 -0700 Subject: [PATCH 3/8] refactor: start topology reconciler refactoring (configmap reconciler) --- config/manager.go | 3 + .../topology/containerlab/controller.go | 14 +-- controllers/topology/kne/controller.go | 14 +-- controllers/topology/reconciler.go | 96 +++++++++++++++--- .../{configmap.go => reconciler_configmap.go} | 98 +++++++------------ ...p_test.go => reconciler_configmap_test.go} | 36 +++---- 6 files changed, 149 insertions(+), 112 deletions(-) rename controllers/topology/{configmap.go => reconciler_configmap.go} (61%) rename controllers/topology/{configmap_test.go => reconciler_configmap_test.go} (88%) diff --git a/config/manager.go b/config/manager.go index e5c0777f..5568d250 100644 --- a/config/manager.go +++ b/config/manager.go @@ -25,6 +25,9 @@ var ( managerInstanceOnce sync.Once //nolint:gochecknoglobals ) +// ManagerGetterFunc returns an instance of the config manager. +type ManagerGetterFunc func() Manager + // InitManager initializes the config manager -- it does this once only, its a no-op if the manager // is already initialized. func InitManager(ctx context.Context, appName, namespace string, client *kubernetes.Clientset) { diff --git a/controllers/topology/containerlab/controller.go b/controllers/topology/containerlab/controller.go index 1c0e5abc..0622e7c8 100644 --- a/controllers/topology/containerlab/controller.go +++ b/controllers/topology/containerlab/controller.go @@ -41,11 +41,11 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: &clabernetescontrollerstopology.Reconciler{ - Log: baseController.Log, - Client: baseController.Client, - ResourceKind: clabernetesapistopology.Containerlab, - ResourceLister: func( + TopologyReconciler: clabernetescontrollerstopology.NewReconciler( + baseController.Log, + baseController.Client, + clabernetesapistopology.Containerlab, + func( ctx context.Context, client ctrlruntimeclient.Client, ) ([]ctrlruntimeclient.Object, error) { @@ -67,8 +67,8 @@ func NewController( return out, nil }, - ConfigManagerGetter: clabernetesconfig.GetManager, - }, + clabernetesconfig.GetManager, + ), } return c diff --git a/controllers/topology/kne/controller.go b/controllers/topology/kne/controller.go index 66722506..5488e31d 100644 --- a/controllers/topology/kne/controller.go +++ b/controllers/topology/kne/controller.go @@ -43,11 +43,11 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: &clabernetescontrollerstopology.Reconciler{ - Log: baseController.Log, - Client: baseController.Client, - ResourceKind: clabernetesapistopology.Kne, - ResourceLister: func( + TopologyReconciler: clabernetescontrollerstopology.NewReconciler( + baseController.Log, + baseController.Client, + clabernetesapistopology.Kne, + func( ctx context.Context, client ctrlruntimeclient.Client, ) ([]ctrlruntimeclient.Object, error) { @@ -69,8 +69,8 @@ func NewController( return out, nil }, - ConfigManagerGetter: clabernetesconfig.GetManager, - }, + clabernetesconfig.GetManager, + ), } return c diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler.go index 1abcddee..d252cc45 100644 --- a/controllers/topology/reconciler.go +++ b/controllers/topology/reconciler.go @@ -4,6 +4,8 @@ import ( "context" "slices" + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + clabernetesconfig "github.com/srl-labs/clabernetes/config" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" @@ -18,6 +20,32 @@ import ( ctrlruntimereconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +// ResourceListerFunc represents a function that can list the objects that a topology controller +// is responsible for. +type ResourceListerFunc func( + ctx context.Context, + client ctrlruntimeclient.Client, +) ([]ctrlruntimeclient.Object, error) + +// NewReconciler creates a new generic Reconciler (TopologyReconciler). +func NewReconciler( + log claberneteslogging.Instance, + client ctrlruntimeclient.Client, + resourceKind string, + resourceLister ResourceListerFunc, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *Reconciler { + return &Reconciler{ + Log: log, + Client: client, + ResourceKind: resourceKind, + ResourceLister: resourceLister, + ConfigManagerGetter: configManagerGetter, + + configMapReconciler: NewConfigMapReconciler(resourceKind, configManagerGetter), + } +} + // Reconciler (TopologyReconciler) is the base clabernetes topology reconciler that is embedded in // all clabernetes topology controllers, it provides common methods for reconciling the // common/standard resources that represent a clabernetes object (configmap, deployments, @@ -26,40 +54,80 @@ type Reconciler struct { Log claberneteslogging.Instance Client ctrlruntimeclient.Client ResourceKind string - ResourceLister func( - ctx context.Context, - client ctrlruntimeclient.Client, - ) ([]ctrlruntimeclient.Object, error) + ResourceLister ResourceListerFunc + + // TODO this should be deleted once we make the sub reconcilers ConfigManagerGetter func() clabernetesconfig.Manager + + configMapReconciler *ConfigMapReconciler + deploymentReconciler *deploymentReconciler + serviceReconciler *serviceReconciler +} + +type ( + deploymentReconciler struct{} + serviceReconciler struct{} +) + +func (r *Reconciler) createObj( + ctx context.Context, + ownerObj, + renderedObj ctrlruntimeclient.Object, +) error { + err := ctrlruntimeutil.SetOwnerReference(ownerObj, renderedObj, r.Client.Scheme()) + if err != nil { + return err + } + + return r.Client.Create(ctx, renderedObj) } // ReconcileConfigMap reconciles the primary configmap containing clabernetes configs and tunnel // information. func (r *Reconciler) ReconcileConfigMap( ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, ) error { - configMap := &k8scorev1.ConfigMap{} + namespacedName := apimachinerytypes.NamespacedName{ + Namespace: owningTopology.GetNamespace(), + Name: owningTopology.GetName(), + } + + renderedConfigMap, err := r.configMapReconciler.Render( + namespacedName, + clabernetesConfigs, + tunnels, + ) + if err != nil { + return err + } + + existingConfigMap := &k8scorev1.ConfigMap{} - err := r.Client.Get( + err = r.Client.Get( ctx, - apimachinerytypes.NamespacedName{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - }, - configMap, + namespacedName, + existingConfigMap, ) if err != nil { if apimachineryerrors.IsNotFound(err) { - return r.createConfigMap(ctx, obj, clabernetesConfigs, tunnels) + return r.createObj(ctx, owningTopology, renderedConfigMap) } return err } - return r.enforceConfigMap(ctx, obj, clabernetesConfigs, tunnels, configMap) + if r.configMapReconciler.Conforms( + existingConfigMap, + renderedConfigMap, + owningTopology.GetUID(), + ) { + return nil + } + + return r.Client.Update(ctx, renderedConfigMap) } // ReconcileDeployments reconciles the deployments that make up a clabernetes Topology. diff --git a/controllers/topology/configmap.go b/controllers/topology/reconciler_configmap.go similarity index 61% rename from controllers/topology/configmap.go rename to controllers/topology/reconciler_configmap.go index d8b88ae8..6a3a1181 100644 --- a/controllers/topology/configmap.go +++ b/controllers/topology/reconciler_configmap.go @@ -1,48 +1,61 @@ package topology import ( - "context" "fmt" "reflect" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - apimachinerytypes "k8s.io/apimachinery/pkg/types" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" "gopkg.in/yaml.v3" k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + apimachinerytypes "k8s.io/apimachinery/pkg/types" ) -// RenderConfigMap accepts an object (just for name/namespace reasons) and a mapping of clabernetes +// NewConfigMapReconciler returns an instance of ConfigMapReconciler. +func NewConfigMapReconciler( + resourceKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ConfigMapReconciler { + return &ConfigMapReconciler{ + resourceKind: resourceKind, + configManagerGetter: configManagerGetter, + } +} + +// ConfigMapReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating configmaps for a +// clabernetes topology resource. +type ConfigMapReconciler struct { + resourceKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Render accepts an object (just for name/namespace reasons) and a mapping of clabernetes // sub-topology configs and tunnels and renders the final configmap for the deployment -- this is // the configmap that will ultimately be referenced when mounting sub-topologies and tunnel data in // the clabernetes launcher pod(s) for a given topology. -func (r *Reconciler) RenderConfigMap( - obj ctrlruntimeclient.Object, +func (r *ConfigMapReconciler) Render( + namespacedName apimachinerytypes.NamespacedName, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, ) (*k8scorev1.ConfigMap, error) { - configManager := r.ConfigManagerGetter() + configManager := r.configManagerGetter() globalAnnotations, globalLabels := configManager.GetAllMetadata() - configMapName := obj.GetName() - configMap := &k8scorev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: obj.GetNamespace(), + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, Annotations: globalAnnotations, Labels: map[string]string{ clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: configMapName, - clabernetesconstants.LabelTopologyOwner: configMapName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, + clabernetesconstants.LabelName: namespacedName.Name, + clabernetesconstants.LabelTopologyOwner: namespacedName.Name, + clabernetesconstants.LabelTopologyKind: r.resourceKind, }, }, Data: map[string]string{}, @@ -78,51 +91,8 @@ func (r *Reconciler) RenderConfigMap( return configMap, nil } -func (r *Reconciler) createConfigMap( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, -) error { - configMap, err := r.RenderConfigMap(obj, clabernetesConfigs, tunnels) - if err != nil { - return err - } - - err = ctrlruntimeutil.SetOwnerReference(obj, configMap, r.Client.Scheme()) - if err != nil { - return err - } - - return r.Client.Create(ctx, configMap) -} - -func (r *Reconciler) enforceConfigMap( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, - actual *k8scorev1.ConfigMap, -) error { - configMap, err := r.RenderConfigMap(obj, clabernetesConfigs, tunnels) - if err != nil { - return err - } - - err = ctrlruntimeutil.SetOwnerReference(obj, configMap, r.Client.Scheme()) - if err != nil { - return err - } - - if configMapConforms(actual, configMap, obj.GetUID()) { - // nothing to do - return nil - } - - return r.Client.Update(ctx, configMap) -} - -func configMapConforms( +// Conforms checks if the existingConfigMap conforms with the renderedConfigMap. +func (r *ConfigMapReconciler) Conforms( existingConfigMap, renderedConfigMap *k8scorev1.ConfigMap, expectedOwnerUID apimachinerytypes.UID, diff --git a/controllers/topology/configmap_test.go b/controllers/topology/reconciler_configmap_test.go similarity index 88% rename from controllers/topology/configmap_test.go rename to controllers/topology/reconciler_configmap_test.go index c93edaa2..86e074f0 100644 --- a/controllers/topology/configmap_test.go +++ b/controllers/topology/reconciler_configmap_test.go @@ -6,6 +6,9 @@ import ( "reflect" "testing" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + clabernetesconfig "github.com/srl-labs/clabernetes/config" k8scorev1 "k8s.io/api/core/v1" @@ -15,9 +18,6 @@ import ( clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" clabernetesutil "github.com/srl-labs/clabernetes/util" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) var defaultPorts = []string{ @@ -44,17 +44,15 @@ const renderConfigMapTestName = "configmap/render-config-map" func TestRenderConfigMap(t *testing.T) { cases := []struct { name string - obj ctrlruntimeclient.Object + namespacedName apimachinerytypes.NamespacedName clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel }{ { name: "basic-two-node-with-links", - obj: &clabernetesapistopologyv1alpha1.Containerlab{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: "nowhere", - }, + namespacedName: apimachinerytypes.NamespacedName{ + Name: "test-configmap", + Namespace: "nowhere", }, clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ "srl1": { @@ -133,11 +131,9 @@ func TestRenderConfigMap(t *testing.T) { }, { name: "basic-two-node-no-links", - obj: &clabernetesapistopologyv1alpha1.Containerlab{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: "nowhere", - }, + namespacedName: apimachinerytypes.NamespacedName{ + Name: "test-configmap", + Namespace: "nowhere", }, clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ "srl1": { @@ -186,13 +182,13 @@ func TestRenderConfigMap(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - reconciler := clabernetescontrollerstopology.Reconciler{ - ResourceKind: "containerlab", - ConfigManagerGetter: clabernetesconfig.GetFakeManager, - } + reconciler := clabernetescontrollerstopology.NewConfigMapReconciler( + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) - got, err := reconciler.RenderConfigMap( - testCase.obj, + got, err := reconciler.Render( + testCase.namespacedName, testCase.clabernetesConfigs, testCase.tunnels, ) From 6ab7324189a118436527eaeca2c1b2f8ea8b8f79 Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sat, 21 Oct 2023 17:34:59 -0700 Subject: [PATCH 4/8] refactor: topology reconciler refactor (deployment) --- controllers/deployments.go | 11 + controllers/topology/deployment.go | 630 ------------------ controllers/topology/reconciler.go | 209 ++++-- controllers/topology/reconciler_crud.go | 111 +++ controllers/topology/reconciler_deployment.go | 526 +++++++++++++++ ..._test.go => reconciler_deployment_test.go} | 19 +- 6 files changed, 832 insertions(+), 674 deletions(-) delete mode 100644 controllers/topology/deployment.go create mode 100644 controllers/topology/reconciler_crud.go create mode 100644 controllers/topology/reconciler_deployment.go rename controllers/topology/{deployment_test.go => reconciler_deployment_test.go} (97%) diff --git a/controllers/deployments.go b/controllers/deployments.go index 03bd2751..0854a5f9 100644 --- a/controllers/deployments.go +++ b/controllers/deployments.go @@ -58,3 +58,14 @@ func ContainersEqual(existing, rendered []k8scorev1.Container) bool { return true } + +// VolumeAlreadyMounted checks if the given volumeName is already in the existingVolumes. +func VolumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { + for idx := range existingVolumes { + if volumeName == existingVolumes[idx].Name { + return true + } + } + + return false +} diff --git a/controllers/topology/deployment.go b/controllers/topology/deployment.go deleted file mode 100644 index d5c084bb..00000000 --- a/controllers/topology/deployment.go +++ /dev/null @@ -1,630 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "reflect" - "strings" - "time" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8sappsv1 "k8s.io/api/apps/v1" - k8scorev1 "k8s.io/api/core/v1" - apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apimachinerytypes "k8s.io/apimachinery/pkg/types" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveDeployments( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedDeployments, error) { - ownedDeployments := &k8sappsv1.DeploymentList{} - - err := r.Client.List( - ctx, - ownedDeployments, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) - - return nil, err - } - - deployments := &clabernetescontrollers.ResolvedDeployments{ - Current: map[string]*k8sappsv1.Deployment{}, - } - - for i := range ownedDeployments.Items { - labels := ownedDeployments.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - deployments.Current[nodeName] = &ownedDeployments.Items[i] - } - - allNodes := make([]string, len(clabernetesConfigs)) - - var nodeIdx int - - for nodeName := range clabernetesConfigs { - allNodes[nodeIdx] = nodeName - - nodeIdx++ - } - - deployments.Missing = clabernetesutil.StringSliceDifference( - deployments.CurrentDeploymentNames(), - allNodes, - ) - - r.Log.Debugf( - "deployments are missing for the following nodes: %s", - deployments.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - allNodes, - deployments.CurrentDeploymentNames(), - ) - - r.Log.Debugf( - "extraneous deployments exist for following nodes: %s", - extraEndpointDeployments, - ) - - deployments.Extra = make([]*k8sappsv1.Deployment, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - deployments.Extra[idx] = deployments.Current[endpoint] - } - - return deployments, nil -} - -func (r *Reconciler) pruneDeployments( - ctx context.Context, - deployments *clabernetescontrollers.ResolvedDeployments, -) error { - r.Log.Info("pruning extraneous deployments") - - for _, extraDeployment := range deployments.Extra { - r.Log.Debugf( - "removing deployment '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing deployment '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceDeployments( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - deployments *clabernetescontrollers.ResolvedDeployments, -) error { - // handle missing deployments - r.Log.Info("creating missing deployments") - - for _, nodeName := range deployments.Missing { - deployment := r.RenderDeployment( - obj, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, deployment, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating deployment '%s/%s'", - deployment.Namespace, - deployment.Name, - ) - - err = r.Client.Create(ctx, deployment) - if err != nil { - r.Log.Criticalf( - "failed creating deployment '%s/%s' error: %s", - deployment.Namespace, - deployment.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing deployments") - - for nodeName, deployment := range deployments.Current { - r.Log.Debugf( - "comparing existing deployment '%s/%s' to desired state", - deployment.Namespace, - deployment.Name, - ) - - expectedDeployment := r.RenderDeployment( - obj, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, expectedDeployment, r.Client.Scheme()) - if err != nil { - return err - } - - if !DeploymentConforms(deployment, expectedDeployment, obj.GetUID()) { - r.Log.Debugf( - "comparing existing deployment '%s/%s' spec does not conform to desired state, "+ - "updating", - deployment.Namespace, - deployment.Name, - ) - - err = r.Client.Update(ctx, expectedDeployment) - if err != nil { - r.Log.Criticalf( - "failed updating deployment '%s/%s' error: %s", - expectedDeployment.Namespace, - expectedDeployment.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) restartDeploymentForNode( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - nodeName string, -) error { - deploymentName := fmt.Sprintf("%s-%s", obj.GetName(), nodeName) - - nodeDeployment := &k8sappsv1.Deployment{} - - err := r.Client.Get( - ctx, - apimachinerytypes.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: deploymentName, - }, - nodeDeployment, - ) - if err != nil { - if apimachineryerrors.IsNotFound(err) { - r.Log.Warnf( - "could not find deployment '%s', cannot restart after config change,"+ - " this should not happen", - deploymentName, - ) - - return nil - } - - return err - } - - if nodeDeployment.Spec.Template.ObjectMeta.Annotations == nil { - nodeDeployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} - } - - now := time.Now().Format(time.RFC3339) - - nodeDeployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = now - - return r.Client.Update(ctx, nodeDeployment) -} - -// RenderDeployment renders a k8sappsv1.Deployment object based on the given clabernetes topology -// object and clabernetes config mapping (sub-topologies) for the given node name. -func (r *Reconciler) RenderDeployment( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, -) *k8sappsv1.Deployment { - globalAnnotations, globalLabels := r.ConfigManagerGetter().GetAllMetadata() - - name := obj.GetName() - - deploymentName := fmt.Sprintf("%s-%s", name, nodeName) - configVolumeName := fmt.Sprintf("%s-config", name) - - // match labels are immutable and dont matter if they have the users provided "global" labels, - // so make those first then copy those into "normal" labels and add the other stuff - matchLabels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: deploymentName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - } - - labels := make(map[string]string) - - for k, v := range matchLabels { - labels[k] = v - } - - for k, v := range globalLabels { - labels[k] = v - } - - commonSpec := obj.GetTopologyCommonSpec() - - launcherLogLevel := clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherLoggerLevelEnv, - clabernetesconstants.Info, - ) - - if commonSpec.LauncherLogLevel != "" { - launcherLogLevel = commonSpec.LauncherLogLevel - } - - deployment := &k8sappsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: obj.GetNamespace(), - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8sappsv1.DeploymentSpec{ - Replicas: clabernetesutil.ToPointer(int32(1)), - RevisionHistoryLimit: clabernetesutil.ToPointer(int32(0)), - Selector: &metav1.LabelSelector{ - MatchLabels: matchLabels, - }, - Template: k8scorev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8scorev1.PodSpec{ - Containers: []k8scorev1.Container{ - { - Name: nodeName, - WorkingDir: "/clabernetes", - Image: clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherImageEnv, - clabernetesconstants.LauncherDefaultImage, - ), - Command: []string{"/clabernetes/manager", "launch"}, - Ports: []k8scorev1.ContainerPort{ - { - Name: "vxlan", - ContainerPort: clabernetesconstants.VXLANServicePort, - Protocol: "UDP", - }, - }, - VolumeMounts: []k8scorev1.VolumeMount{ - { - Name: configVolumeName, - ReadOnly: true, - MountPath: "/clabernetes/topo.clab.yaml", - SubPath: nodeName, - }, - { - Name: configVolumeName, - ReadOnly: true, - MountPath: "/clabernetes/tunnels.yaml", - SubPath: fmt.Sprintf("%s-tunnels", nodeName), - }, - }, - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: "File", - ImagePullPolicy: k8scorev1.PullPolicy( - clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherPullPolicyEnv, - "IfNotPresent", - ), - ), - SecurityContext: &k8scorev1.SecurityContext{ - // obviously we need privileged for dind setup - Privileged: clabernetesutil.ToPointer(true), - RunAsUser: clabernetesutil.ToPointer(int64(0)), - }, - Env: []k8scorev1.EnvVar{ - { - Name: clabernetesconstants.LauncherLoggerLevelEnv, - Value: launcherLogLevel, - }, - }, - }, - }, - RestartPolicy: "Always", - ServiceAccountName: "default", - Volumes: []k8scorev1.Volume{ - { - Name: configVolumeName, - VolumeSource: k8scorev1.VolumeSource{ - ConfigMap: &k8scorev1.ConfigMapVolumeSource{ - LocalObjectReference: k8scorev1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - }, - } - - if commonSpec.ContainerlabDebug { - deployment.Spec.Template.Spec.Containers[0].Env = append( - deployment.Spec.Template.Spec.Containers[0].Env, - k8scorev1.EnvVar{ - Name: clabernetesconstants.LauncherContainerlabDebug, - Value: clabernetesconstants.True, - }, - ) - } - - deployment = renderDeploymentAddFilesFromConfigMaps(nodeName, obj, deployment) - - deployment = renderDeploymentAddInsecureRegistries(obj, deployment) - - deployment = r.renderDeploymentAddResources(obj, clabernetesConfigs, nodeName, deployment) - - return deployment -} - -func volumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { - for idx := range existingVolumes { - if volumeName == existingVolumes[idx].Name { - return true - } - } - - return false -} - -func renderDeploymentAddFilesFromConfigMaps( - nodeName string, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - podVolumes := make([]clabernetesapistopologyv1alpha1.FileFromConfigMap, 0) - - for _, fileFromConfigMap := range obj.GetTopologyCommonSpec().FilesFromConfigMap { - if fileFromConfigMap.NodeName != nodeName { - continue - } - - podVolumes = append(podVolumes, fileFromConfigMap) - } - - for _, podVolume := range podVolumes { - if !volumeAlreadyMounted(podVolume.ConfigMapName, deployment.Spec.Template.Spec.Volumes) { - deployment.Spec.Template.Spec.Volumes = append( - deployment.Spec.Template.Spec.Volumes, - k8scorev1.Volume{ - Name: podVolume.ConfigMapName, - VolumeSource: k8scorev1.VolumeSource{ - ConfigMap: &k8scorev1.ConfigMapVolumeSource{ - LocalObjectReference: k8scorev1.LocalObjectReference{ - Name: podVolume.ConfigMapName, - }, - }, - }, - }, - ) - } - - volumeMount := k8scorev1.VolumeMount{ - Name: podVolume.ConfigMapName, - ReadOnly: false, - MountPath: fmt.Sprintf("/clabernetes/%s", podVolume.FilePath), - SubPath: podVolume.ConfigMapPath, - } - - deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append( - deployment.Spec.Template.Spec.Containers[0].VolumeMounts, - volumeMount, - ) - } - - return deployment -} - -func renderDeploymentAddInsecureRegistries( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - insecureRegistries := obj.GetTopologyCommonSpec().InsecureRegistries - - if len(insecureRegistries) > 0 { - deployment.Spec.Template.Spec.Containers[0].Env = append( - deployment.Spec.Template.Spec.Containers[0].Env, - k8scorev1.EnvVar{ - Name: clabernetesconstants.LauncherInsecureRegistries, - Value: strings.Join(insecureRegistries, ","), - }, - ) - } - - return deployment -} - -func (r *Reconciler) renderDeploymentAddResources( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - commonSpec := obj.GetTopologyCommonSpec() - - nodeResources, nodeResourcesOk := commonSpec.Resources[nodeName] - if nodeResourcesOk { - deployment.Spec.Template.Spec.Containers[0].Resources = nodeResources - - return deployment - } - - defaultResources, defaultResourcesOk := commonSpec.Resources[clabernetesconstants.Default] - if defaultResourcesOk { - deployment.Spec.Template.Spec.Containers[0].Resources = defaultResources - - return deployment - } - - resources := r.ConfigManagerGetter().GetResourcesForContainerlabKind( - clabernetesConfigs[nodeName].Topology.GetNodeKindType(nodeName), - ) - - if resources != nil { - deployment.Spec.Template.Spec.Containers[0].Resources = *resources - } - - return deployment -} - -// DeploymentConforms asserts if a given deployment conforms with a rendered deployment -- this -// isn't checking if the services are exactly the same, just checking that the parts clabernetes -// cares about are the same. -func DeploymentConforms( - existingDeployment, - renderedDeployment *k8sappsv1.Deployment, - expectedOwnerUID apimachinerytypes.UID, -) bool { - if !reflect.DeepEqual(existingDeployment.Spec.Replicas, renderedDeployment.Spec.Replicas) { - return false - } - - if !reflect.DeepEqual(existingDeployment.Spec.Selector, renderedDeployment.Spec.Selector) { - return false - } - - if !clabernetescontrollers.ContainersEqual( - existingDeployment.Spec.Template.Spec.Containers, - renderedDeployment.Spec.Template.Spec.Containers, - ) { - return false - } - - if !reflect.DeepEqual( - existingDeployment.Spec.Template.Spec.ServiceAccountName, - renderedDeployment.Spec.Template.Spec.ServiceAccountName, - ) { - return false - } - - if !reflect.DeepEqual( - existingDeployment.Spec.Template.Spec.RestartPolicy, - renderedDeployment.Spec.Template.Spec.RestartPolicy, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Annotations, - renderedDeployment.ObjectMeta.Annotations, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Labels, - renderedDeployment.ObjectMeta.Labels, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.Spec.Template.ObjectMeta.Annotations, - renderedDeployment.Spec.Template.ObjectMeta.Annotations, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.Spec.Template.ObjectMeta.Labels, - renderedDeployment.Spec.Template.ObjectMeta.Labels, - ) { - return false - } - - if len(existingDeployment.ObjectMeta.OwnerReferences) != 1 { - // we should have only one owner reference, the extractor - return false - } - - if existingDeployment.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { - // owner ref uid is not us - return false - } - - return true -} - -func determineNodesNeedingRestart( - preReconcileConfigs, - configs map[string]*clabernetesutilcontainerlab.Config, -) []string { - var nodesNeedingRestart []string - - for nodeName, nodeConfig := range configs { - _, nodeExistedBefore := preReconcileConfigs[nodeName] - if !nodeExistedBefore { - continue - } - - if !reflect.DeepEqual(nodeConfig, preReconcileConfigs[nodeName]) { - nodesNeedingRestart = append( - nodesNeedingRestart, - nodeName, - ) - } - } - - return nodesNeedingRestart -} diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler.go index d252cc45..80b02870 100644 --- a/controllers/topology/reconciler.go +++ b/controllers/topology/reconciler.go @@ -2,7 +2,14 @@ package topology import ( "context" + "fmt" "slices" + "time" + + clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + k8sappsv1 "k8s.io/api/apps/v1" ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -42,7 +49,8 @@ func NewReconciler( ResourceLister: resourceLister, ConfigManagerGetter: configManagerGetter, - configMapReconciler: NewConfigMapReconciler(resourceKind, configManagerGetter), + configMapReconciler: NewConfigMapReconciler(resourceKind, configManagerGetter), + deploymentReconciler: NewDeploymentReconciler(resourceKind, configManagerGetter), } } @@ -60,26 +68,7 @@ type Reconciler struct { ConfigManagerGetter func() clabernetesconfig.Manager configMapReconciler *ConfigMapReconciler - deploymentReconciler *deploymentReconciler - serviceReconciler *serviceReconciler -} - -type ( - deploymentReconciler struct{} - serviceReconciler struct{} -) - -func (r *Reconciler) createObj( - ctx context.Context, - ownerObj, - renderedObj ctrlruntimeclient.Object, -) error { - err := ctrlruntimeutil.SetOwnerReference(ownerObj, renderedObj, r.Client.Scheme()) - if err != nil { - return err - } - - return r.Client.Create(ctx, renderedObj) + deploymentReconciler *DeploymentReconciler } // ReconcileConfigMap reconciles the primary configmap containing clabernetes configs and tunnel @@ -127,32 +116,63 @@ func (r *Reconciler) ReconcileConfigMap( return nil } - return r.Client.Update(ctx, renderedConfigMap) + return r.updateObj(ctx, renderedConfigMap) } -// ReconcileDeployments reconciles the deployments that make up a clabernetes Topology. -func (r *Reconciler) ReconcileDeployments( +func (r *Reconciler) reconcileDeploymentsResolve( ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - preReconcileConfigs, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) error { - deployments, err := r.resolveDeployments(ctx, obj, clabernetesConfigs) - if err != nil { - return err - } + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) (*clabernetescontrollers.ResolvedDeployments, error) { + ownedDeployments := &k8sappsv1.DeploymentList{} - err = r.pruneDeployments(ctx, deployments) + err := r.Client.List( + ctx, + ownedDeployments, + ctrlruntimeclient.InNamespace(owningTopology.GetNamespace()), + ctrlruntimeclient.MatchingLabels{ + clabernetesconstants.LabelTopologyOwner: owningTopology.GetName(), + }, + ) if err != nil { - return err + r.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) + + return nil, err } - err = r.enforceDeployments(ctx, obj, clabernetesConfigs, deployments) + deployments, err := r.deploymentReconciler.Resolve(ownedDeployments, currentClabernetesConfigs) if err != nil { - return err + r.Log.Criticalf("failed resolving owned deployments, error: '%s'", err) + + return nil, err } - nodesNeedingRestart := determineNodesNeedingRestart(preReconcileConfigs, clabernetesConfigs) + r.Log.Debugf( + "deployments are missing for the following nodes: %s", + deployments.Missing, + ) + + r.Log.Debugf( + "extraneous deployments exist for following nodes: %s", + deployments.Extra, + ) + + return deployments, nil +} + +func (r *Reconciler) reconcileDeploymentsHandleRestarts( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + deployments *clabernetescontrollers.ResolvedDeployments, +) error { + r.Log.Info("determining nodes needing restart") + + nodesNeedingRestart := r.deploymentReconciler.DetermineNodesNeedingRestart( + previousClabernetesConfigs, + currentClabernetesConfigs, + ) if len(nodesNeedingRestart) == 0 { return nil } @@ -168,7 +188,41 @@ func (r *Reconciler) ReconcileDeployments( nodesNeedingRestart, ) - err = r.restartDeploymentForNode(ctx, obj, nodeName) + deploymentName := fmt.Sprintf("%s-%s", owningTopology.GetName(), nodeName) + + nodeDeployment := &k8sappsv1.Deployment{} + + err := r.getObj( + ctx, + nodeDeployment, + apimachinerytypes.NamespacedName{ + Namespace: owningTopology.GetNamespace(), + Name: deploymentName, + }, + ) + if err != nil { + if apimachineryerrors.IsNotFound(err) { + r.Log.Warnf( + "could not find deployment '%s', cannot restart after config change,"+ + " this should not happen", + deploymentName, + ) + + return nil + } + + return err + } + + if nodeDeployment.Spec.Template.ObjectMeta.Annotations == nil { + nodeDeployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} + } + + now := time.Now().Format(time.RFC3339) + + nodeDeployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = now //nolint:lll + + err = r.updateObj(ctx, nodeDeployment) if err != nil { return err } @@ -177,6 +231,85 @@ func (r *Reconciler) ReconcileDeployments( return nil } +// ReconcileDeployments reconciles the deployments that make up a clabernetes Topology. +func (r *Reconciler) ReconcileDeployments( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) error { + deployments, err := r.reconcileDeploymentsResolve( + ctx, + owningTopology, + currentClabernetesConfigs, + ) + if err != nil { + return err + } + + r.Log.Info("pruning extraneous deployments") + + for _, extraDeployment := range deployments.Extra { + err = r.deleteObj(ctx, extraDeployment) + if err != nil { + return err + } + } + + r.Log.Info("creating missing deployments") + + renderedMissingDeployments := r.deploymentReconciler.RenderAll( + owningTopology, + currentClabernetesConfigs, + deployments.Missing, + ) + + for _, renderedMissingDeployment := range renderedMissingDeployments { + err = r.createObj(ctx, owningTopology, renderedMissingDeployment) + if err != nil { + return err + } + } + + r.Log.Info("enforcing desired state on existing deployments") + + for existingCurrentDeploymentNodeName, existingCurrentDeployment := range deployments.Current { + renderedCurrentDeployment := r.deploymentReconciler.Render( + owningTopology, + currentClabernetesConfigs, + existingCurrentDeploymentNodeName, + ) + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentDeployment, + r.Client.Scheme(), + ) + if err != nil { + return err + } + + if !r.deploymentReconciler.Conforms( + existingCurrentDeployment, + renderedCurrentDeployment, + owningTopology.GetUID(), + ) { + err = r.updateObj(ctx, renderedCurrentDeployment) + if err != nil { + return err + } + } + } + + return r.reconcileDeploymentsHandleRestarts( + ctx, + owningTopology, + previousClabernetesConfigs, + currentClabernetesConfigs, + deployments, + ) +} + // ReconcileServiceFabric reconciles the service used for "fabric" (inter node) connectivity. func (r *Reconciler) ReconcileServiceFabric( ctx context.Context, diff --git a/controllers/topology/reconciler_crud.go b/controllers/topology/reconciler_crud.go new file mode 100644 index 00000000..1bc33a05 --- /dev/null +++ b/controllers/topology/reconciler_crud.go @@ -0,0 +1,111 @@ +package topology + +import ( + "context" + + apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func (r *Reconciler) createObj( + ctx context.Context, + ownerObj, + renderedObj ctrlruntimeclient.Object, +) error { + err := ctrlruntimeutil.SetOwnerReference(ownerObj, renderedObj, r.Client.Scheme()) + if err != nil { + return err + } + + r.Log.Debugf( + "creating %s '%s/%s'", + renderedObj.GetObjectKind().GroupVersionKind().Kind, + renderedObj.GetNamespace(), + renderedObj.GetName(), + ) + + err = r.Client.Create(ctx, renderedObj) + if err != nil { + r.Log.Criticalf( + "failed creating %s '%s/%s' error: %s", + renderedObj.GetObjectKind().GroupVersionKind().Kind, + renderedObj.GetNamespace(), + renderedObj.GetName(), + err, + ) + + return err + } + + return nil +} + +func (r *Reconciler) getObj( + ctx context.Context, + getObj ctrlruntimeclient.Object, + namespacedName apimachinerytypes.NamespacedName, +) error { + r.Log.Debugf( + "getting %s '%s/%s'", + getObj.GetObjectKind().GroupVersionKind().Kind, + getObj.GetNamespace(), + getObj.GetName(), + ) + + return r.Client.Get(ctx, namespacedName, getObj) +} + +func (r *Reconciler) updateObj( + ctx context.Context, + updateObj ctrlruntimeclient.Object, +) error { + r.Log.Debugf( + "updating %s '%s/%s'", + updateObj.GetObjectKind().GroupVersionKind().Kind, + updateObj.GetNamespace(), + updateObj.GetName(), + ) + + err := r.Client.Update(ctx, updateObj) + if err != nil { + r.Log.Criticalf( + "failed updating %s '%s/%s' error: %s", + updateObj.GetObjectKind().GroupVersionKind().Kind, + updateObj.GetNamespace(), + updateObj.GetName(), + err, + ) + + return err + } + + return nil +} + +func (r *Reconciler) deleteObj( + ctx context.Context, + deleteObj ctrlruntimeclient.Object, +) error { + r.Log.Debugf( + "deleting %s '%s/%s'", + deleteObj.GetObjectKind().GroupVersionKind().Kind, + deleteObj.GetNamespace(), + deleteObj.GetName(), + ) + + err := r.Client.Delete(ctx, deleteObj) + if err != nil { + r.Log.Criticalf( + "failed deleting %s '%s/%s' error: %s", + deleteObj.GetObjectKind().GroupVersionKind().Kind, + deleteObj.GetNamespace(), + deleteObj.GetName(), + err, + ) + + return err + } + + return nil +} diff --git a/controllers/topology/reconciler_deployment.go b/controllers/topology/reconciler_deployment.go new file mode 100644 index 00000000..ae073c4d --- /dev/null +++ b/controllers/topology/reconciler_deployment.go @@ -0,0 +1,526 @@ +package topology + +import ( + "fmt" + "reflect" + "strings" + + apimachinerytypes "k8s.io/apimachinery/pkg/types" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8sappsv1 "k8s.io/api/apps/v1" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewDeploymentReconciler returns an instance of DeploymentReconciler. +func NewDeploymentReconciler( + resourceKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *DeploymentReconciler { + return &DeploymentReconciler{ + resourceKind: resourceKind, + configManagerGetter: configManagerGetter, + } +} + +// DeploymentReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating deployments for a +// clabernetes topology resource. +type DeploymentReconciler struct { + resourceKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of deployments that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ResolvedDeployments object +// that contains the missing, extra, and current deployments for the topology. +func (r *DeploymentReconciler) Resolve( + ownedDeployments *k8sappsv1.DeploymentList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) (*clabernetescontrollers.ResolvedDeployments, error) { + deployments := &clabernetescontrollers.ResolvedDeployments{ + Current: map[string]*k8sappsv1.Deployment{}, + } + + for i := range ownedDeployments.Items { + labels := ownedDeployments.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + deployments.Current[nodeName] = &ownedDeployments.Items[i] + } + + allNodes := make([]string, len(clabernetesConfigs)) + + var nodeIdx int + + for nodeName := range clabernetesConfigs { + allNodes[nodeIdx] = nodeName + + nodeIdx++ + } + + deployments.Missing = clabernetesutil.StringSliceDifference( + deployments.CurrentDeploymentNames(), + allNodes, + ) + + extraEndpointDeployments := clabernetesutil.StringSliceDifference( + allNodes, + deployments.CurrentDeploymentNames(), + ) + + deployments.Extra = make([]*k8sappsv1.Deployment, len(extraEndpointDeployments)) + + for idx, endpoint := range extraEndpointDeployments { + deployments.Extra[idx] = deployments.Current[endpoint] + } + + return deployments, nil +} + +func (r *DeploymentReconciler) renderDeploymentBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8sappsv1.Deployment { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := make(map[string]string) + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(1)), + RevisionHistoryLimit: clabernetesutil.ToPointer(int32(0)), + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.PodSpec{ + Containers: []k8scorev1.Container{}, + RestartPolicy: "Always", + ServiceAccountName: "default", + Volumes: []k8scorev1.Volume{}, + }, + }, + }, + } +} + +func (r *DeploymentReconciler) renderDeploymentVolumes( + deployment *k8sappsv1.Deployment, + nodeName, + configVolumeName, + owningTopologyName string, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, +) []k8scorev1.VolumeMount { + volumes := []k8scorev1.Volume{ + { + Name: configVolumeName, + VolumeSource: k8scorev1.VolumeSource{ + ConfigMap: &k8scorev1.ConfigMapVolumeSource{ + LocalObjectReference: k8scorev1.LocalObjectReference{ + Name: owningTopologyName, + }, + }, + }, + }, + } + + volumesFromConfigMaps := make([]clabernetesapistopologyv1alpha1.FileFromConfigMap, 0) + + volumeMountsFromCommonSpec := make([]k8scorev1.VolumeMount, 0) + + for _, fileFromConfigMap := range owningTopologyCommonSpec.FilesFromConfigMap { + if fileFromConfigMap.NodeName != nodeName { + continue + } + + volumesFromConfigMaps = append(volumesFromConfigMaps, fileFromConfigMap) + } + + for _, podVolume := range volumesFromConfigMaps { + if !clabernetescontrollers.VolumeAlreadyMounted( + podVolume.ConfigMapName, + deployment.Spec.Template.Spec.Volumes, + ) { + deployment.Spec.Template.Spec.Volumes = append( + deployment.Spec.Template.Spec.Volumes, + k8scorev1.Volume{ + Name: podVolume.ConfigMapName, + VolumeSource: k8scorev1.VolumeSource{ + ConfigMap: &k8scorev1.ConfigMapVolumeSource{ + LocalObjectReference: k8scorev1.LocalObjectReference{ + Name: podVolume.ConfigMapName, + }, + }, + }, + }, + ) + } + + volumeMount := k8scorev1.VolumeMount{ + Name: podVolume.ConfigMapName, + ReadOnly: false, + MountPath: fmt.Sprintf("/clabernetes/%s", podVolume.FilePath), + SubPath: podVolume.ConfigMapPath, + } + + volumeMountsFromCommonSpec = append( + volumeMountsFromCommonSpec, + volumeMount, + ) + } + + deployment.Spec.Template.Spec.Volumes = volumes + + return volumeMountsFromCommonSpec +} + +func (r *DeploymentReconciler) renderDeploymentContainer( + deployment *k8sappsv1.Deployment, + nodeName, + configVolumeName string, + volumeMountsFromCommonSpec []k8scorev1.VolumeMount, +) { + container := k8scorev1.Container{ + Name: nodeName, + WorkingDir: "/clabernetes", + Image: clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherImageEnv, + clabernetesconstants.LauncherDefaultImage, + ), + Command: []string{"/clabernetes/manager", "launch"}, + Ports: []k8scorev1.ContainerPort{ + { + Name: "vxlan", + ContainerPort: clabernetesconstants.VXLANServicePort, + Protocol: "UDP", + }, + }, + VolumeMounts: []k8scorev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: true, + MountPath: "/clabernetes/topo.clab.yaml", + SubPath: nodeName, + }, + { + Name: configVolumeName, + ReadOnly: true, + MountPath: "/clabernetes/tunnels.yaml", + SubPath: fmt.Sprintf("%s-tunnels", nodeName), + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: k8scorev1.PullPolicy( + clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherPullPolicyEnv, + "IfNotPresent", + ), + ), + SecurityContext: &k8scorev1.SecurityContext{ + // obviously we need privileged for dind setup + Privileged: clabernetesutil.ToPointer(true), + RunAsUser: clabernetesutil.ToPointer(int64(0)), + }, + } + + container.VolumeMounts = append(container.VolumeMounts, volumeMountsFromCommonSpec...) + + deployment.Spec.Template.Spec.Containers = []k8scorev1.Container{container} +} + +func (r *DeploymentReconciler) renderDeploymentContainerEnv( + deployment *k8sappsv1.Deployment, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, +) { + launcherLogLevel := clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherLoggerLevelEnv, + clabernetesconstants.Info, + ) + + if owningTopologyCommonSpec.LauncherLogLevel != "" { + launcherLogLevel = owningTopologyCommonSpec.LauncherLogLevel + } + + envs := []k8scorev1.EnvVar{ + { + Name: clabernetesconstants.LauncherLoggerLevelEnv, + Value: launcherLogLevel, + }, + } + + if owningTopologyCommonSpec.ContainerlabDebug { + envs = append( + envs, + k8scorev1.EnvVar{ + Name: clabernetesconstants.LauncherContainerlabDebug, + Value: clabernetesconstants.True, + }, + ) + } + + if len(owningTopologyCommonSpec.InsecureRegistries) > 0 { + deployment.Spec.Template.Spec.Containers[0].Env = append( + deployment.Spec.Template.Spec.Containers[0].Env, + k8scorev1.EnvVar{ + Name: clabernetesconstants.LauncherInsecureRegistries, + Value: strings.Join(owningTopologyCommonSpec.InsecureRegistries, ","), + }, + ) + } + + deployment.Spec.Template.Spec.Containers[0].Env = envs +} + +func (r *DeploymentReconciler) renderDeploymentContainerResources( + deployment *k8sappsv1.Deployment, + nodeName string, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) { + nodeResources, nodeResourcesOk := owningTopologyCommonSpec.Resources[nodeName] + if nodeResourcesOk { + deployment.Spec.Template.Spec.Containers[0].Resources = nodeResources + + return + } + + defaultResources, defaultResourcesOk := owningTopologyCommonSpec.Resources[clabernetesconstants.Default] //nolint:lll + if defaultResourcesOk { + deployment.Spec.Template.Spec.Containers[0].Resources = defaultResources + + return + } + + resources := r.configManagerGetter().GetResourcesForContainerlabKind( + clabernetesConfigs[nodeName].Topology.GetNodeKindType(nodeName), + ) + + if resources != nil { + deployment.Spec.Template.Spec.Containers[0].Resources = *resources + } +} + +// Render accepts an object (just for name/namespace reasons) a mapping of clabernetes +// sub-topology configs and a node name and renders the final deployment for this node. +func (r *DeploymentReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) *k8sappsv1.Deployment { + owningTopologyName := owningTopology.GetName() + + owningTopologyCommonSpec := owningTopology.GetTopologyCommonSpec() + + configVolumeName := fmt.Sprintf("%s-config", owningTopologyName) + + deployment := r.renderDeploymentBase( + fmt.Sprintf("%s-%s", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + volumeMountsFromCommonSpec := r.renderDeploymentVolumes( + deployment, + nodeName, + configVolumeName, + owningTopologyName, + &owningTopologyCommonSpec, + ) + + r.renderDeploymentContainer( + deployment, + nodeName, + configVolumeName, + volumeMountsFromCommonSpec, + ) + + r.renderDeploymentContainerEnv( + deployment, + &owningTopologyCommonSpec, + ) + + r.renderDeploymentContainerResources( + deployment, + nodeName, + &owningTopologyCommonSpec, + clabernetesConfigs, + ) + + return deployment +} + +// RenderAll accepts an object (just for name/namespace reasons) a mapping of clabernetes +// sub-topology configs and a list of node names and renders the final deployment for the given +// nodes. +func (r *DeploymentReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeNames []string, +) []*k8sappsv1.Deployment { + deployments := make([]*k8sappsv1.Deployment, len(nodeNames)) + + for idx, nodeName := range nodeNames { + deployments[idx] = r.Render( + owningTopology, + clabernetesConfigs, + nodeName, + ) + } + + return deployments +} + +// Conforms checks if the existingDeployment conforms with the renderedDeployment. +func (r *DeploymentReconciler) Conforms( + existingDeployment, + renderedDeployment *k8sappsv1.Deployment, + expectedOwnerUID apimachinerytypes.UID, +) bool { + if !reflect.DeepEqual(existingDeployment.Spec.Replicas, renderedDeployment.Spec.Replicas) { + return false + } + + if !reflect.DeepEqual(existingDeployment.Spec.Selector, renderedDeployment.Spec.Selector) { + return false + } + + if !clabernetescontrollers.ContainersEqual( + existingDeployment.Spec.Template.Spec.Containers, + renderedDeployment.Spec.Template.Spec.Containers, + ) { + return false + } + + if !reflect.DeepEqual( + existingDeployment.Spec.Template.Spec.ServiceAccountName, + renderedDeployment.Spec.Template.Spec.ServiceAccountName, + ) { + return false + } + + if !reflect.DeepEqual( + existingDeployment.Spec.Template.Spec.RestartPolicy, + renderedDeployment.Spec.Template.Spec.RestartPolicy, + ) { + return false + } + + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Annotations, + renderedDeployment.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Labels, + renderedDeployment.ObjectMeta.Labels, + ) { + return false + } + + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.Spec.Template.ObjectMeta.Annotations, + renderedDeployment.Spec.Template.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetescontrollers.AnnotationsOrLabelsConform( + existingDeployment.Spec.Template.ObjectMeta.Labels, + renderedDeployment.Spec.Template.ObjectMeta.Labels, + ) { + return false + } + + if len(existingDeployment.ObjectMeta.OwnerReferences) != 1 { + // we should have only one owner reference, the extractor + return false + } + + if existingDeployment.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { + // owner ref uid is not us + return false + } + + return true +} + +// DetermineNodesNeedingRestart accepts a mapping of the previously stored clabernetes +// sub-topologies and the current reconcile loops rendered topologies and returns a slice of node +// names whose deployments need restarting due to configuration changes. +func (r *DeploymentReconciler) DetermineNodesNeedingRestart( + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) []string { + var nodesNeedingRestart []string + + for nodeName, nodeConfig := range currentClabernetesConfigs { + _, nodeExistedBefore := previousClabernetesConfigs[nodeName] + if !nodeExistedBefore { + continue + } + + if !reflect.DeepEqual(nodeConfig, previousClabernetesConfigs[nodeName]) { + nodesNeedingRestart = append( + nodesNeedingRestart, + nodeName, + ) + } + } + + return nodesNeedingRestart +} diff --git a/controllers/topology/deployment_test.go b/controllers/topology/reconciler_deployment_test.go similarity index 97% rename from controllers/topology/deployment_test.go rename to controllers/topology/reconciler_deployment_test.go index 1f739220..16d42e57 100644 --- a/controllers/topology/deployment_test.go +++ b/controllers/topology/reconciler_deployment_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + clabernetesconfig "github.com/srl-labs/clabernetes/config" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" @@ -563,7 +565,12 @@ func TestDeploymentConforms(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - actual := clabernetescontrollerstopology.DeploymentConforms( + reconciler := clabernetescontrollerstopology.NewDeploymentReconciler( + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + actual := reconciler.Conforms( testCase.existing, testCase.rendered, testCase.ownerUID, @@ -646,12 +653,12 @@ func TestRenderDeployment(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - reconciler := clabernetescontrollerstopology.Reconciler{ - ResourceKind: "containerlab", - ConfigManagerGetter: clabernetesconfig.GetFakeManager, - } + reconciler := clabernetescontrollerstopology.NewDeploymentReconciler( + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) - got := reconciler.RenderDeployment( + got := reconciler.Render( testCase.obj, testCase.clabernetesConfigs, testCase.nodeName, From 680590b3d2c0706091e6e1ab565325def682792d Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sun, 22 Oct 2023 10:58:50 -0700 Subject: [PATCH 5/8] fix: k8s type in topology reconciler logs --- constants/kubernetes.go | 12 ++++++++++ controllers/topology/reconciler.go | 27 ++++++++++++++++----- controllers/topology/reconciler_crud.go | 32 ++++++++++++++----------- 3 files changed, 51 insertions(+), 20 deletions(-) create mode 100644 constants/kubernetes.go diff --git a/constants/kubernetes.go b/constants/kubernetes.go new file mode 100644 index 00000000..56fb76c3 --- /dev/null +++ b/constants/kubernetes.go @@ -0,0 +1,12 @@ +package constants + +const ( + // KubernetesConfigMap is a const to use for "configmap". + KubernetesConfigMap = "configmap" + + // KubernetesService is a const to use for "service". + KubernetesService = "service" + + // KubernetesDeployment is a const to use for "deployment". + KubernetesDeployment = "deployment" +) diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler.go index 80b02870..b52c8740 100644 --- a/controllers/topology/reconciler.go +++ b/controllers/topology/reconciler.go @@ -102,7 +102,12 @@ func (r *Reconciler) ReconcileConfigMap( ) if err != nil { if apimachineryerrors.IsNotFound(err) { - return r.createObj(ctx, owningTopology, renderedConfigMap) + return r.createObj( + ctx, + owningTopology, + renderedConfigMap, + clabernetesconstants.KubernetesConfigMap, + ) } return err @@ -116,7 +121,7 @@ func (r *Reconciler) ReconcileConfigMap( return nil } - return r.updateObj(ctx, renderedConfigMap) + return r.updateObj(ctx, renderedConfigMap, clabernetesconstants.KubernetesConfigMap) } func (r *Reconciler) reconcileDeploymentsResolve( @@ -199,6 +204,7 @@ func (r *Reconciler) reconcileDeploymentsHandleRestarts( Namespace: owningTopology.GetNamespace(), Name: deploymentName, }, + clabernetesconstants.KubernetesDeployment, ) if err != nil { if apimachineryerrors.IsNotFound(err) { @@ -222,7 +228,7 @@ func (r *Reconciler) reconcileDeploymentsHandleRestarts( nodeDeployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = now //nolint:lll - err = r.updateObj(ctx, nodeDeployment) + err = r.updateObj(ctx, nodeDeployment, clabernetesconstants.KubernetesDeployment) if err != nil { return err } @@ -250,7 +256,7 @@ func (r *Reconciler) ReconcileDeployments( r.Log.Info("pruning extraneous deployments") for _, extraDeployment := range deployments.Extra { - err = r.deleteObj(ctx, extraDeployment) + err = r.deleteObj(ctx, extraDeployment, clabernetesconstants.KubernetesDeployment) if err != nil { return err } @@ -265,7 +271,12 @@ func (r *Reconciler) ReconcileDeployments( ) for _, renderedMissingDeployment := range renderedMissingDeployments { - err = r.createObj(ctx, owningTopology, renderedMissingDeployment) + err = r.createObj( + ctx, + owningTopology, + renderedMissingDeployment, + clabernetesconstants.KubernetesDeployment, + ) if err != nil { return err } @@ -294,7 +305,11 @@ func (r *Reconciler) ReconcileDeployments( renderedCurrentDeployment, owningTopology.GetUID(), ) { - err = r.updateObj(ctx, renderedCurrentDeployment) + err = r.updateObj( + ctx, + renderedCurrentDeployment, + clabernetesconstants.KubernetesDeployment, + ) if err != nil { return err } diff --git a/controllers/topology/reconciler_crud.go b/controllers/topology/reconciler_crud.go index 1bc33a05..9a40525a 100644 --- a/controllers/topology/reconciler_crud.go +++ b/controllers/topology/reconciler_crud.go @@ -11,27 +11,28 @@ import ( func (r *Reconciler) createObj( ctx context.Context, ownerObj, - renderedObj ctrlruntimeclient.Object, + createObj ctrlruntimeclient.Object, + createObjKind string, ) error { - err := ctrlruntimeutil.SetOwnerReference(ownerObj, renderedObj, r.Client.Scheme()) + err := ctrlruntimeutil.SetOwnerReference(ownerObj, createObj, r.Client.Scheme()) if err != nil { return err } r.Log.Debugf( "creating %s '%s/%s'", - renderedObj.GetObjectKind().GroupVersionKind().Kind, - renderedObj.GetNamespace(), - renderedObj.GetName(), + createObjKind, + createObj.GetNamespace(), + createObj.GetName(), ) - err = r.Client.Create(ctx, renderedObj) + err = r.Client.Create(ctx, createObj) if err != nil { r.Log.Criticalf( "failed creating %s '%s/%s' error: %s", - renderedObj.GetObjectKind().GroupVersionKind().Kind, - renderedObj.GetNamespace(), - renderedObj.GetName(), + createObjKind, + createObj.GetNamespace(), + createObj.GetName(), err, ) @@ -45,10 +46,11 @@ func (r *Reconciler) getObj( ctx context.Context, getObj ctrlruntimeclient.Object, namespacedName apimachinerytypes.NamespacedName, + getObjKind string, ) error { r.Log.Debugf( "getting %s '%s/%s'", - getObj.GetObjectKind().GroupVersionKind().Kind, + getObjKind, getObj.GetNamespace(), getObj.GetName(), ) @@ -59,10 +61,11 @@ func (r *Reconciler) getObj( func (r *Reconciler) updateObj( ctx context.Context, updateObj ctrlruntimeclient.Object, + updateObjKind string, ) error { r.Log.Debugf( "updating %s '%s/%s'", - updateObj.GetObjectKind().GroupVersionKind().Kind, + updateObjKind, updateObj.GetNamespace(), updateObj.GetName(), ) @@ -71,7 +74,7 @@ func (r *Reconciler) updateObj( if err != nil { r.Log.Criticalf( "failed updating %s '%s/%s' error: %s", - updateObj.GetObjectKind().GroupVersionKind().Kind, + updateObjKind, updateObj.GetNamespace(), updateObj.GetName(), err, @@ -86,10 +89,11 @@ func (r *Reconciler) updateObj( func (r *Reconciler) deleteObj( ctx context.Context, deleteObj ctrlruntimeclient.Object, + deleteObjKind string, ) error { r.Log.Debugf( "deleting %s '%s/%s'", - deleteObj.GetObjectKind().GroupVersionKind().Kind, + deleteObjKind, deleteObj.GetNamespace(), deleteObj.GetName(), ) @@ -98,7 +102,7 @@ func (r *Reconciler) deleteObj( if err != nil { r.Log.Criticalf( "failed deleting %s '%s/%s' error: %s", - deleteObj.GetObjectKind().GroupVersionKind().Kind, + deleteObjKind, deleteObj.GetNamespace(), deleteObj.GetName(), err, From 392f5da17c09ad087c8b2b5489944a0bf15ef9a9 Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sun, 22 Oct 2023 14:05:39 -0700 Subject: [PATCH 6/8] refactor: giant refactor of topology reconciler, moved k8s utils to dedicated package --- .golangci.yaml | 4 + clabverter/files.go | 4 +- clicker/clabernetes.go | 6 +- constants/kubernetes.go | 6 + controllers/base.go | 13 +- controllers/deployments.go | 71 ---- controllers/services.go | 31 -- controllers/topology/containerlab/config.go | 2 +- .../topology/containerlab/controller.go | 6 +- controllers/topology/kne/config.go | 2 +- controllers/topology/kne/controller.go | 8 +- .../configmap.go} | 22 +- .../configmap_test.go} | 10 +- .../crud.go} | 2 +- .../deployment.go} | 65 ++-- .../deployment_test.go} | 13 +- .../topology/{ => reconciler}/reconciler.go | 290 ++++++++++---- .../topology/reconciler/reconciler_test.go | 14 + controllers/topology/reconciler/resolve.go | 66 ++++ .../topology/{ => reconciler}/service.go | 20 +- .../topology/{ => reconciler}/service_test.go | 7 +- .../topology/reconciler/serviceexpose.go | 286 ++++++++++++++ .../topology/reconciler/servicefabric.go | 197 ++++++++++ .../basic-two-node-no-links.json | 0 .../basic-two-node-with-links.json | 0 .../deployment/render-deployment/simple.json | 2 + .../allocate-tunnel-ids/meshy-links.json | 90 +++++ .../simple-already-allocated-ids.json | 22 ++ .../simple-existing-status.json | 22 ++ .../simple-weirdly-allocated-ids.json | 22 ++ .../tunnels/allocate-tunnel-ids/simple.json | 22 ++ controllers/topology/serviceexpose.go | 359 ------------------ controllers/topology/servicefabric.go | 267 ------------- .../10-service.containerlab-basic-srl1.yaml | 2 + logging/fake.go | 38 ++ manager/clabernetes.go | 4 +- manager/election/util.go | 6 +- testhelper/kubernetes.go | 4 +- util/kubernetes.go | 115 ------ util/kubernetes/containers.go | 33 ++ {controllers => util/kubernetes}/meta.go | 2 +- util/kubernetes/names.go | 34 ++ .../names_test.go} | 77 +--- util/kubernetes/namespace.go | 35 ++ util/kubernetes/objectdiffer.go | 55 +++ util/kubernetes/resources.go | 55 +++ util/kubernetes/resources_test.go | 75 ++++ util/kubernetes/volumes.go | 14 + 48 files changed, 1412 insertions(+), 1088 deletions(-) delete mode 100644 controllers/deployments.go delete mode 100644 controllers/services.go rename controllers/topology/{reconciler_configmap.go => reconciler/configmap.go} (87%) rename controllers/topology/{reconciler_configmap_test.go => reconciler/configmap_test.go} (94%) rename controllers/topology/{reconciler_crud.go => reconciler/crud.go} (99%) rename controllers/topology/{reconciler_deployment.go => reconciler/deployment.go} (89%) rename controllers/topology/{reconciler_deployment_test.go => reconciler/deployment_test.go} (97%) rename controllers/topology/{ => reconciler}/reconciler.go (61%) create mode 100644 controllers/topology/reconciler/reconciler_test.go create mode 100644 controllers/topology/reconciler/resolve.go rename controllers/topology/{ => reconciler}/service.go (72%) rename controllers/topology/{ => reconciler}/service_test.go (98%) create mode 100644 controllers/topology/reconciler/serviceexpose.go create mode 100644 controllers/topology/reconciler/servicefabric.go rename controllers/topology/{ => reconciler}/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json (100%) rename controllers/topology/{ => reconciler}/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json (100%) rename controllers/topology/{ => reconciler}/test-fixtures/golden/deployment/render-deployment/simple.json (96%) create mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json create mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json create mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json create mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json create mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json delete mode 100644 controllers/topology/serviceexpose.go delete mode 100644 controllers/topology/servicefabric.go create mode 100644 logging/fake.go delete mode 100644 util/kubernetes.go create mode 100644 util/kubernetes/containers.go rename {controllers => util/kubernetes}/meta.go (97%) create mode 100644 util/kubernetes/names.go rename util/{kubernetes_test.go => kubernetes/names_test.go} (50%) create mode 100644 util/kubernetes/namespace.go create mode 100644 util/kubernetes/objectdiffer.go create mode 100644 util/kubernetes/resources.go create mode 100644 util/kubernetes/resources_test.go create mode 100644 util/kubernetes/volumes.go diff --git a/.golangci.yaml b/.golangci.yaml index 325c037a..4f578352 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -145,6 +145,10 @@ issues: - funlen - gochecknoglobals + - path: logging/fake.go + linters: + - revive + # ignore globals for standard k8s things - linters: - gochecknoglobals diff --git a/clabverter/files.go b/clabverter/files.go index 8b81f757..e43bfa51 100644 --- a/clabverter/files.go +++ b/clabverter/files.go @@ -6,6 +6,8 @@ import ( "strings" "text/template" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" ) @@ -262,7 +264,7 @@ func (c *Clabverter) handleExtraFiles() error { c.extraFilesConfigMaps[nodeName] = make([]topologyConfigMapTemplateVars, 0) for extraFilePath, extraFileContent := range nodeExtraFiles { - safeFileName := clabernetesutil.SafeConcatNameKubernetes( + safeFileName := clabernetesutilkubernetes.SafeConcatNameKubernetes( strings.Split(extraFilePath, "/")...) safeFileName = strings.TrimPrefix(safeFileName, "-") diff --git a/clicker/clabernetes.go b/clicker/clabernetes.go index 50244b7a..7155dfff 100644 --- a/clicker/clabernetes.go +++ b/clicker/clabernetes.go @@ -9,6 +9,8 @@ import ( "sync" "time" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + "gopkg.in/yaml.v3" claberneteserrors "github.com/srl-labs/clabernetes/errors" @@ -203,7 +205,7 @@ func (c *clabernetes) run() error { func (c *clabernetes) setup() error { var err error - c.namespace, err = clabernetesutil.CurrentNamespace() + c.namespace, err = clabernetesutilkubernetes.CurrentNamespace() if err != nil { c.logger.Criticalf("failed getting current namespace, err: %s", err) @@ -318,7 +320,7 @@ func envToResources() (k8scorev1.ResourceRequirements, error) { return out, nil } - parsedOut, err := clabernetesutil.YAMLToK8sResourceRequirements(asStr) + parsedOut, err := clabernetesutilkubernetes.YAMLToK8sResourceRequirements(asStr) if err != nil { return out, err } diff --git a/constants/kubernetes.go b/constants/kubernetes.go index 56fb76c3..2c6e255c 100644 --- a/constants/kubernetes.go +++ b/constants/kubernetes.go @@ -9,4 +9,10 @@ const ( // KubernetesDeployment is a const to use for "deployment". KubernetesDeployment = "deployment" + + // KubernetesServiceClusterIPType is a const to use for "ClusterIP". + KubernetesServiceClusterIPType = "ClusterIP" + + // KubernetesServiceLoadBalancerType is a const to use for "LoadBalancer". + KubernetesServiceLoadBalancerType = "LoadBalancer" ) diff --git a/controllers/base.go b/controllers/base.go index 57927524..4aa456a2 100644 --- a/controllers/base.go +++ b/controllers/base.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + ctrlruntimeevent "sigs.k8s.io/controller-runtime/pkg/event" ctrlruntimepredicate "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -58,7 +60,7 @@ func NewBaseController( return &BaseController{ Ctx: ctx, AppName: appName, - ControllerNamespace: clabernetesutil.MustCurrentNamespace(), + ControllerNamespace: clabernetesutilkubernetes.MustCurrentNamespace(), Log: logger, Config: config, Client: client, @@ -127,3 +129,12 @@ func (c *BaseController) LogReconcileCompleteObjectNotExist(_ ctrlruntime.Reques func (c *BaseController) LogReconcileFailedGettingObject(req ctrlruntime.Request, err error) { c.Log.Criticalf("failed fetching '%s/%s', error: %s", req.Namespace, req.Name, err) } + +// GetServiceDNSSuffix returns the default "svc.cluster.local" dns suffix, or the user's provided +// override value. +func (c *BaseController) GetServiceDNSSuffix() string { + return clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.InClusterDNSSuffixEnv, + clabernetesconstants.DefaultInClusterDNSSuffix, + ) +} diff --git a/controllers/deployments.go b/controllers/deployments.go deleted file mode 100644 index 0854a5f9..00000000 --- a/controllers/deployments.go +++ /dev/null @@ -1,71 +0,0 @@ -package controllers - -import ( - "reflect" - - k8sappsv1 "k8s.io/api/apps/v1" - k8scorev1 "k8s.io/api/core/v1" -) - -// ResolvedDeployments is an object that is used to track current and missing deployments for a -// controller such as Containerlab (topology). -type ResolvedDeployments struct { - // Current deployments by endpoint name - Current map[string]*k8sappsv1.Deployment - // Missing deployments by endpoint name - Missing []string - // Extra deployments that should be pruned - Extra []*k8sappsv1.Deployment -} - -// CurrentDeploymentNames returns a slice of the names of the current deployments. -func (r *ResolvedDeployments) CurrentDeploymentNames() []string { - names := make([]string, len(r.Current)) - - var idx int - - for k := range r.Current { - names[idx] = k - - idx++ - } - - return names -} - -// ContainersEqual returns true if the existing container slice matches the rendered container slice -// it ignores slice order. -func ContainersEqual(existing, rendered []k8scorev1.Container) bool { - if len(existing) != len(rendered) { - return false - } - - for existingIdx := range existing { - var matched bool - - for renderedIdx := range rendered { - if reflect.DeepEqual(existing[existingIdx], rendered[renderedIdx]) { - matched = true - - break - } - } - - if !matched { - return false - } - } - - return true -} - -// VolumeAlreadyMounted checks if the given volumeName is already in the existingVolumes. -func VolumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { - for idx := range existingVolumes { - if volumeName == existingVolumes[idx].Name { - return true - } - } - - return false -} diff --git a/controllers/services.go b/controllers/services.go deleted file mode 100644 index 8c43f5f4..00000000 --- a/controllers/services.go +++ /dev/null @@ -1,31 +0,0 @@ -package controllers - -import ( - k8scorev1 "k8s.io/api/core/v1" -) - -// ResolvedServices is an object that is used to track current and missing services for a -// controller such as Containerlab (topology). -type ResolvedServices struct { - // Current deployments by endpoint name - Current map[string]*k8scorev1.Service - // Missing deployments by endpoint name - Missing []string - // Extra deployments that should be pruned - Extra []*k8scorev1.Service -} - -// CurrentServiceNames returns a slice of the names of the current services. -func (r *ResolvedServices) CurrentServiceNames() []string { - names := make([]string, len(r.Current)) - - var idx int - - for k := range r.Current { - names[idx] = k - - idx++ - } - - return names -} diff --git a/controllers/topology/containerlab/config.go b/controllers/topology/containerlab/config.go index d1043053..d7d4561e 100644 --- a/controllers/topology/containerlab/config.go +++ b/controllers/topology/containerlab/config.go @@ -403,7 +403,7 @@ func (c *Controller) processConfigForNode( clab.Name, uninterestingEndpoint.NodeName, clab.Namespace, - clabernetescontrollerstopology.GetServiceDNSSuffix(), + c.BaseController.GetServiceDNSSuffix(), ), LocalLinkName: interestingEndpoint.InterfaceName, RemoteLinkName: uninterestingEndpoint.InterfaceName, diff --git a/controllers/topology/containerlab/controller.go b/controllers/topology/containerlab/controller.go index 0622e7c8..b96e2d10 100644 --- a/controllers/topology/containerlab/controller.go +++ b/controllers/topology/containerlab/controller.go @@ -14,7 +14,7 @@ import ( clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" "k8s.io/client-go/rest" ctrlruntime "sigs.k8s.io/controller-runtime" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,7 +41,7 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: clabernetescontrollerstopology.NewReconciler( + TopologyReconciler: clabernetescontrollerstopologyreconciler.NewReconciler( baseController.Log, baseController.Client, clabernetesapistopology.Containerlab, @@ -77,7 +77,7 @@ func NewController( // Controller is the Containerlab topology controller object. type Controller struct { *clabernetescontrollers.BaseController - TopologyReconciler *clabernetescontrollerstopology.Reconciler + TopologyReconciler *clabernetescontrollerstopologyreconciler.Reconciler } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/topology/kne/config.go b/controllers/topology/kne/config.go index 873e9d52..9c0f5bf7 100644 --- a/controllers/topology/kne/config.go +++ b/controllers/topology/kne/config.go @@ -160,7 +160,7 @@ func (c *Controller) processConfig( //nolint:funlen kne.Name, uninterestingEndpoint.NodeName, kne.Namespace, - clabernetescontrollerstopology.GetServiceDNSSuffix(), + c.BaseController.GetServiceDNSSuffix(), ), LocalLinkName: interestingEndpoint.InterfaceName, RemoteLinkName: uninterestingEndpoint.InterfaceName, diff --git a/controllers/topology/kne/controller.go b/controllers/topology/kne/controller.go index 5488e31d..5bcfd009 100644 --- a/controllers/topology/kne/controller.go +++ b/controllers/topology/kne/controller.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + clabernetesconfig "github.com/srl-labs/clabernetes/config" ctrlruntimebuilder "sigs.k8s.io/controller-runtime/pkg/builder" @@ -11,8 +13,6 @@ import ( k8scorev1 "k8s.io/api/core/v1" ctrlruntimehandler "sigs.k8s.io/controller-runtime/pkg/handler" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" - clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" @@ -43,7 +43,7 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: clabernetescontrollerstopology.NewReconciler( + TopologyReconciler: clabernetescontrollerstopologyreconciler.NewReconciler( baseController.Log, baseController.Client, clabernetesapistopology.Kne, @@ -79,7 +79,7 @@ func NewController( // Controller is the Containerlab topology controller object. type Controller struct { *clabernetescontrollers.BaseController - TopologyReconciler *clabernetescontrollerstopology.Reconciler + TopologyReconciler *clabernetescontrollerstopologyreconciler.Reconciler } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/topology/reconciler_configmap.go b/controllers/topology/reconciler/configmap.go similarity index 87% rename from controllers/topology/reconciler_configmap.go rename to controllers/topology/reconciler/configmap.go index 6a3a1181..6bd6234c 100644 --- a/controllers/topology/reconciler_configmap.go +++ b/controllers/topology/reconciler/configmap.go @@ -1,13 +1,16 @@ -package topology +package reconciler import ( "fmt" "reflect" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesconfig "github.com/srl-labs/clabernetes/config" clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" "gopkg.in/yaml.v3" k8scorev1 "k8s.io/api/core/v1" @@ -17,11 +20,13 @@ import ( // NewConfigMapReconciler returns an instance of ConfigMapReconciler. func NewConfigMapReconciler( - resourceKind string, + log claberneteslogging.Instance, + owningTopologyKind string, configManagerGetter clabernetesconfig.ManagerGetterFunc, ) *ConfigMapReconciler { return &ConfigMapReconciler{ - resourceKind: resourceKind, + log: log, + owningTopologyKind: owningTopologyKind, configManagerGetter: configManagerGetter, } } @@ -30,7 +35,8 @@ func NewConfigMapReconciler( // purposes. This is the component responsible for rendering/validating configmaps for a // clabernetes topology resource. type ConfigMapReconciler struct { - resourceKind string + log claberneteslogging.Instance + owningTopologyKind string configManagerGetter clabernetesconfig.ManagerGetterFunc } @@ -55,7 +61,7 @@ func (r *ConfigMapReconciler) Render( clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, clabernetesconstants.LabelName: namespacedName.Name, clabernetesconstants.LabelTopologyOwner: namespacedName.Name, - clabernetesconstants.LabelTopologyKind: r.resourceKind, + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, }, }, Data: map[string]string{}, @@ -101,14 +107,14 @@ func (r *ConfigMapReconciler) Conforms( return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingConfigMap.ObjectMeta.Annotations, renderedConfigMap.ObjectMeta.Annotations, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingConfigMap.ObjectMeta.Labels, renderedConfigMap.ObjectMeta.Labels, ) { diff --git a/controllers/topology/reconciler_configmap_test.go b/controllers/topology/reconciler/configmap_test.go similarity index 94% rename from controllers/topology/reconciler_configmap_test.go rename to controllers/topology/reconciler/configmap_test.go index 86e074f0..82eb6fbb 100644 --- a/controllers/topology/reconciler_configmap_test.go +++ b/controllers/topology/reconciler/configmap_test.go @@ -1,4 +1,4 @@ -package topology_test +package reconciler_test import ( "encoding/json" @@ -6,6 +6,10 @@ import ( "reflect" "testing" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" apimachinerytypes "k8s.io/apimachinery/pkg/types" @@ -14,7 +18,6 @@ import ( k8scorev1 "k8s.io/api/core/v1" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" clabernetesutil "github.com/srl-labs/clabernetes/util" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" @@ -182,7 +185,8 @@ func TestRenderConfigMap(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - reconciler := clabernetescontrollerstopology.NewConfigMapReconciler( + reconciler := clabernetescontrollerstopologyreconciler.NewConfigMapReconciler( + &claberneteslogging.FakeInstance{}, clabernetesapistopology.Containerlab, clabernetesconfig.GetFakeManager, ) diff --git a/controllers/topology/reconciler_crud.go b/controllers/topology/reconciler/crud.go similarity index 99% rename from controllers/topology/reconciler_crud.go rename to controllers/topology/reconciler/crud.go index 9a40525a..869782f2 100644 --- a/controllers/topology/reconciler_crud.go +++ b/controllers/topology/reconciler/crud.go @@ -1,4 +1,4 @@ -package topology +package reconciler import ( "context" diff --git a/controllers/topology/reconciler_deployment.go b/controllers/topology/reconciler/deployment.go similarity index 89% rename from controllers/topology/reconciler_deployment.go rename to controllers/topology/reconciler/deployment.go index ae073c4d..71614218 100644 --- a/controllers/topology/reconciler_deployment.go +++ b/controllers/topology/reconciler/deployment.go @@ -1,16 +1,19 @@ -package topology +package reconciler import ( "fmt" "reflect" "strings" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + apimachinerytypes "k8s.io/apimachinery/pkg/types" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesconfig "github.com/srl-labs/clabernetes/config" clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" claberneteserrors "github.com/srl-labs/clabernetes/errors" clabernetesutil "github.com/srl-labs/clabernetes/util" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" @@ -21,11 +24,13 @@ import ( // NewDeploymentReconciler returns an instance of DeploymentReconciler. func NewDeploymentReconciler( - resourceKind string, + log claberneteslogging.Instance, + owningTopologyKind string, configManagerGetter clabernetesconfig.ManagerGetterFunc, ) *DeploymentReconciler { return &DeploymentReconciler{ - resourceKind: resourceKind, + log: log, + owningTopologyKind: owningTopologyKind, configManagerGetter: configManagerGetter, } } @@ -34,18 +39,20 @@ func NewDeploymentReconciler( // purposes. This is the component responsible for rendering/validating deployments for a // clabernetes topology resource. type DeploymentReconciler struct { - resourceKind string + log claberneteslogging.Instance + owningTopologyKind string configManagerGetter clabernetesconfig.ManagerGetterFunc } // Resolve accepts a mapping of clabernetes configs and a list of deployments that are -- by owner -// reference and/or labels -- associated with the topology. It returns a ResolvedDeployments object +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object // that contains the missing, extra, and current deployments for the topology. func (r *DeploymentReconciler) Resolve( ownedDeployments *k8sappsv1.DeploymentList, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedDeployments, error) { - deployments := &clabernetescontrollers.ResolvedDeployments{ + _ clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment], error) { + deployments := &clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment]{ Current: map[string]*k8sappsv1.Deployment{}, } @@ -80,21 +87,8 @@ func (r *DeploymentReconciler) Resolve( nodeIdx++ } - deployments.Missing = clabernetesutil.StringSliceDifference( - deployments.CurrentDeploymentNames(), - allNodes, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - allNodes, - deployments.CurrentDeploymentNames(), - ) - - deployments.Extra = make([]*k8sappsv1.Deployment, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - deployments.Extra[idx] = deployments.Current[endpoint] - } + deployments.SetMissing(allNodes) + deployments.SetExtra(allNodes) return deployments, nil } @@ -116,7 +110,9 @@ func (r *DeploymentReconciler) renderDeploymentBase( clabernetesconstants.LabelTopologyNode: nodeName, } - labels := make(map[string]string) + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + } for k, v := range selectorLabels { labels[k] = v @@ -188,7 +184,7 @@ func (r *DeploymentReconciler) renderDeploymentVolumes( } for _, podVolume := range volumesFromConfigMaps { - if !clabernetescontrollers.VolumeAlreadyMounted( + if !clabernetesutilkubernetes.VolumeAlreadyMounted( podVolume.ConfigMapName, deployment.Spec.Template.Spec.Volumes, ) { @@ -352,8 +348,8 @@ func (r *DeploymentReconciler) renderDeploymentContainerResources( } } -// Render accepts an object (just for name/namespace reasons) a mapping of clabernetes -// sub-topology configs and a node name and renders the final deployment for this node. +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final deployment for this node. func (r *DeploymentReconciler) Render( owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, @@ -402,9 +398,8 @@ func (r *DeploymentReconciler) Render( return deployment } -// RenderAll accepts an object (just for name/namespace reasons) a mapping of clabernetes -// sub-topology configs and a list of node names and renders the final deployment for the given -// nodes. +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final deployments for the given nodes. func (r *DeploymentReconciler) RenderAll( owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, @@ -437,7 +432,7 @@ func (r *DeploymentReconciler) Conforms( return false } - if !clabernetescontrollers.ContainersEqual( + if !clabernetesutilkubernetes.ContainersEqual( existingDeployment.Spec.Template.Spec.Containers, renderedDeployment.Spec.Template.Spec.Containers, ) { @@ -458,28 +453,28 @@ func (r *DeploymentReconciler) Conforms( return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingDeployment.ObjectMeta.Annotations, renderedDeployment.ObjectMeta.Annotations, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingDeployment.ObjectMeta.Labels, renderedDeployment.ObjectMeta.Labels, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingDeployment.Spec.Template.ObjectMeta.Annotations, renderedDeployment.Spec.Template.ObjectMeta.Annotations, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingDeployment.Spec.Template.ObjectMeta.Labels, renderedDeployment.Spec.Template.ObjectMeta.Labels, ) { diff --git a/controllers/topology/reconciler_deployment_test.go b/controllers/topology/reconciler/deployment_test.go similarity index 97% rename from controllers/topology/reconciler_deployment_test.go rename to controllers/topology/reconciler/deployment_test.go index 16d42e57..a37b7da9 100644 --- a/controllers/topology/reconciler_deployment_test.go +++ b/controllers/topology/reconciler/deployment_test.go @@ -1,4 +1,4 @@ -package topology_test +package reconciler_test import ( "encoding/json" @@ -6,6 +6,10 @@ import ( "reflect" "testing" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesconfig "github.com/srl-labs/clabernetes/config" @@ -13,7 +17,6 @@ import ( clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" clabernetesutil "github.com/srl-labs/clabernetes/util" k8sappsv1 "k8s.io/api/apps/v1" @@ -565,7 +568,8 @@ func TestDeploymentConforms(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - reconciler := clabernetescontrollerstopology.NewDeploymentReconciler( + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, clabernetesapistopology.Containerlab, clabernetesconfig.GetFakeManager, ) @@ -653,7 +657,8 @@ func TestRenderDeployment(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - reconciler := clabernetescontrollerstopology.NewDeploymentReconciler( + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, clabernetesapistopology.Containerlab, clabernetesconfig.GetFakeManager, ) diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler/reconciler.go similarity index 61% rename from controllers/topology/reconciler.go rename to controllers/topology/reconciler/reconciler.go index b52c8740..ce0c0ea2 100644 --- a/controllers/topology/reconciler.go +++ b/controllers/topology/reconciler/reconciler.go @@ -1,4 +1,4 @@ -package topology +package reconciler import ( "context" @@ -6,7 +6,7 @@ import ( "slices" "time" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" clabernetesconstants "github.com/srl-labs/clabernetes/constants" k8sappsv1 "k8s.io/api/apps/v1" @@ -38,19 +38,36 @@ type ResourceListerFunc func( func NewReconciler( log claberneteslogging.Instance, client ctrlruntimeclient.Client, - resourceKind string, + owningTopologyKind string, resourceLister ResourceListerFunc, configManagerGetter clabernetesconfig.ManagerGetterFunc, ) *Reconciler { return &Reconciler{ - Log: log, - Client: client, - ResourceKind: resourceKind, - ResourceLister: resourceLister, - ConfigManagerGetter: configManagerGetter, - - configMapReconciler: NewConfigMapReconciler(resourceKind, configManagerGetter), - deploymentReconciler: NewDeploymentReconciler(resourceKind, configManagerGetter), + Log: log, + Client: client, + ResourceKind: owningTopologyKind, + ResourceLister: resourceLister, + + configMapReconciler: NewConfigMapReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + deploymentReconciler: NewDeploymentReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + serviceFabricReconciler: NewServiceFabricReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + serviceExposeReconciler: NewServiceExposeReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), } } @@ -64,11 +81,10 @@ type Reconciler struct { ResourceKind string ResourceLister ResourceListerFunc - // TODO this should be deleted once we make the sub reconcilers - ConfigManagerGetter func() clabernetesconfig.Manager - - configMapReconciler *ConfigMapReconciler - deploymentReconciler *DeploymentReconciler + configMapReconciler *ConfigMapReconciler + deploymentReconciler *DeploymentReconciler + serviceFabricReconciler *ServiceFabricReconciler + serviceExposeReconciler *ServiceExposeReconciler } // ReconcileConfigMap reconciles the primary configmap containing clabernetes configs and tunnel @@ -124,53 +140,12 @@ func (r *Reconciler) ReconcileConfigMap( return r.updateObj(ctx, renderedConfigMap, clabernetesconstants.KubernetesConfigMap) } -func (r *Reconciler) reconcileDeploymentsResolve( - ctx context.Context, - owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, - currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedDeployments, error) { - ownedDeployments := &k8sappsv1.DeploymentList{} - - err := r.Client.List( - ctx, - ownedDeployments, - ctrlruntimeclient.InNamespace(owningTopology.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: owningTopology.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) - - return nil, err - } - - deployments, err := r.deploymentReconciler.Resolve(ownedDeployments, currentClabernetesConfigs) - if err != nil { - r.Log.Criticalf("failed resolving owned deployments, error: '%s'", err) - - return nil, err - } - - r.Log.Debugf( - "deployments are missing for the following nodes: %s", - deployments.Missing, - ) - - r.Log.Debugf( - "extraneous deployments exist for following nodes: %s", - deployments.Extra, - ) - - return deployments, nil -} - func (r *Reconciler) reconcileDeploymentsHandleRestarts( ctx context.Context, owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, previousClabernetesConfigs, currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - deployments *clabernetescontrollers.ResolvedDeployments, + deployments *clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment], ) error { r.Log.Info("determining nodes needing restart") @@ -244,10 +219,15 @@ func (r *Reconciler) ReconcileDeployments( previousClabernetesConfigs, currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, ) error { - deployments, err := r.reconcileDeploymentsResolve( + deployments, err := reconcileResolve( ctx, + r, + &k8sappsv1.Deployment{}, + &k8sappsv1.DeploymentList{}, + clabernetesconstants.KubernetesDeployment, owningTopology, currentClabernetesConfigs, + r.deploymentReconciler.Resolve, ) if err != nil { return err @@ -328,22 +308,88 @@ func (r *Reconciler) ReconcileDeployments( // ReconcileServiceFabric reconciles the service used for "fabric" (inter node) connectivity. func (r *Reconciler) ReconcileServiceFabric( ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, ) error { - services, err := r.resolveFabricServices(ctx, obj, clabernetesConfigs) + serviceTypeName := fmt.Sprintf("fabric %s", clabernetesconstants.KubernetesService) + + services, err := reconcileResolve( + ctx, + r, + &k8scorev1.Service{}, + &k8scorev1.ServiceList{}, + serviceTypeName, + owningTopology, + currentClabernetesConfigs, + r.serviceExposeReconciler.Resolve, + ) if err != nil { return err } - err = r.pruneFabricServices(ctx, services) - if err != nil { - return err + r.Log.Info("pruning extraneous fabric services") + + for _, extraService := range services.Extra { + err = r.deleteObj( + ctx, + extraService, + serviceTypeName, + ) + if err != nil { + return err + } } - err = r.enforceFabricServices(ctx, obj, services) - if err != nil { - return err + r.Log.Info("creating missing fabric services") + + renderedMissingServices := r.serviceFabricReconciler.RenderAll( + owningTopology, + services.Missing, + ) + + for _, renderedMissingService := range renderedMissingServices { + err = r.createObj( + ctx, + owningTopology, + renderedMissingService, + serviceTypeName, + ) + if err != nil { + return err + } + } + + r.Log.Info("enforcing desired state on fabric services") + + for existingCurrentServiceNodeName, existingCurrentService := range services.Current { + renderedCurrentService := r.serviceFabricReconciler.Render( + owningTopology, + existingCurrentServiceNodeName, + ) + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentService, + r.Client.Scheme(), + ) + if err != nil { + return err + } + + if !r.serviceFabricReconciler.Conforms( + existingCurrentService, + renderedCurrentService, + owningTopology.GetUID(), + ) { + err = r.updateObj( + ctx, + renderedCurrentService, + serviceTypeName, + ) + if err != nil { + return err + } + } } return nil @@ -352,45 +398,121 @@ func (r *Reconciler) ReconcileServiceFabric( // ReconcileServicesExpose reconciles the service(s) used for exposing nodes. func (r *Reconciler) ReconcileServicesExpose( ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, ) (bool, error) { + serviceTypeName := fmt.Sprintf("expose %s", clabernetesconstants.KubernetesService) + var shouldUpdate bool - objTopologyStatus := obj.GetTopologyStatus() + owningTopologyStatus := owningTopology.GetTopologyStatus() - if objTopologyStatus.NodeExposedPorts == nil { - objTopologyStatus.NodeExposedPorts = map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{} //nolint:lll + if owningTopologyStatus.NodeExposedPorts == nil { + owningTopologyStatus.NodeExposedPorts = map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{} //nolint:lll shouldUpdate = true } - services, err := r.resolveExposeServices(ctx, obj, clabernetesConfigs) + services, err := reconcileResolve( + ctx, + r, + &k8scorev1.Service{}, + &k8scorev1.ServiceList{}, + serviceTypeName, + owningTopology, + currentClabernetesConfigs, + r.serviceExposeReconciler.Resolve, + ) if err != nil { return shouldUpdate, err } - err = r.pruneExposeServices(ctx, services) - if err != nil { - return shouldUpdate, err + r.Log.Info("pruning extraneous services") + + for _, extraDeployment := range services.Extra { + err = r.deleteObj( + ctx, + extraDeployment, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } } - err = r.enforceExposeServices(ctx, obj, &objTopologyStatus, clabernetesConfigs, services) - if err != nil { - return shouldUpdate, err + r.Log.Info("creating missing services") + + renderedMissingServices := r.serviceExposeReconciler.RenderAll( + owningTopology, + &owningTopologyStatus, + currentClabernetesConfigs, + services.Missing, + ) + + for _, renderedMissingService := range renderedMissingServices { + err = r.createObj( + ctx, + owningTopology, + renderedMissingService, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } + } + + for existingCurrentServiceNodeName, existingCurrentService := range services.Current { + renderedCurrentService := r.serviceExposeReconciler.Render( + owningTopology, + &owningTopologyStatus, + currentClabernetesConfigs, + existingCurrentServiceNodeName, + ) + + if len(existingCurrentService.Status.LoadBalancer.Ingress) == 1 { + // can/would this ever be more than 1? i dunno? + address := existingCurrentService.Status.LoadBalancer.Ingress[0].IP + if address != "" { + owningTopologyStatus.NodeExposedPorts[existingCurrentServiceNodeName].LoadBalancerAddress = address //nolint:lll + } + } + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentService, + r.Client.Scheme(), + ) + if err != nil { + return shouldUpdate, err + } + + if !r.serviceExposeReconciler.Conforms( + existingCurrentService, + renderedCurrentService, + owningTopology.GetUID(), + ) { + err = r.updateObj( + ctx, + renderedCurrentService, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } + } } - nodeExposedPortsBytes, err := yaml.Marshal(objTopologyStatus.NodeExposedPorts) + nodeExposedPortsBytes, err := yaml.Marshal(owningTopologyStatus.NodeExposedPorts) if err != nil { return shouldUpdate, err } newNodeExposedPortsHash := clabernetesutil.HashBytes(nodeExposedPortsBytes) - if objTopologyStatus.NodeExposedPortsHash != newNodeExposedPortsHash { - objTopologyStatus.NodeExposedPortsHash = newNodeExposedPortsHash + if owningTopologyStatus.NodeExposedPortsHash != newNodeExposedPortsHash { + owningTopologyStatus.NodeExposedPortsHash = newNodeExposedPortsHash - obj.SetTopologyStatus(objTopologyStatus) + owningTopology.SetTopologyStatus(owningTopologyStatus) shouldUpdate = true } diff --git a/controllers/topology/reconciler/reconciler_test.go b/controllers/topology/reconciler/reconciler_test.go new file mode 100644 index 00000000..4020b9ec --- /dev/null +++ b/controllers/topology/reconciler/reconciler_test.go @@ -0,0 +1,14 @@ +package reconciler_test + +import ( + "os" + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" +) + +func TestMain(m *testing.M) { + clabernetestesthelper.Flags() + + os.Exit(m.Run()) +} diff --git a/controllers/topology/reconciler/resolve.go b/controllers/topology/reconciler/resolve.go new file mode 100644 index 00000000..13fda9a4 --- /dev/null +++ b/controllers/topology/reconciler/resolve.go @@ -0,0 +1,66 @@ +package reconciler + +import ( + "context" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func reconcileResolve[T ctrlruntimeclient.Object, TL ctrlruntimeclient.ObjectList]( + ctx context.Context, + reconciler *Reconciler, + ownedType T, + ownedTypeListing TL, + ownedTypeName string, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + resolveFunc func( + ownedObject TL, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + ) (*clabernetesutilkubernetes.ObjectDiffer[T], error), +) (*clabernetesutilkubernetes.ObjectDiffer[T], error) { + // strictly passed for typing reasons + _ = ownedType + + err := reconciler.Client.List( + ctx, + ownedTypeListing, + ctrlruntimeclient.InNamespace(owningTopology.GetNamespace()), + ctrlruntimeclient.MatchingLabels{ + clabernetesconstants.LabelTopologyOwner: owningTopology.GetName(), + }, + ) + if err != nil { + reconciler.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) + + return nil, err + } + + resolved, err := resolveFunc(ownedTypeListing, currentClabernetesConfigs, owningTopology) + if err != nil { + reconciler.Log.Criticalf("failed resolving owned deployments, error: '%s'", err) + + return nil, err + } + + reconciler.Log.Debugf( + "%ss are missing for the following nodes: %s", + ownedTypeName, + resolved.Missing, + ) + + reconciler.Log.Debugf( + "extraneous %ss exist for following nodes: %s", + ownedTypeName, + resolved.Extra, + ) + + return resolved, nil +} diff --git a/controllers/topology/service.go b/controllers/topology/reconciler/service.go similarity index 72% rename from controllers/topology/service.go rename to controllers/topology/reconciler/service.go index 1dec01e9..1de48e11 100644 --- a/controllers/topology/service.go +++ b/controllers/topology/reconciler/service.go @@ -1,12 +1,9 @@ -package topology +package reconciler import ( "reflect" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" k8scorev1 "k8s.io/api/core/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" @@ -55,14 +52,14 @@ func ServiceConforms( } } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingService.ObjectMeta.Annotations, renderedService.ObjectMeta.Annotations, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingService.ObjectMeta.Labels, renderedService.ObjectMeta.Labels, ) { @@ -81,12 +78,3 @@ func ServiceConforms( return true } - -// GetServiceDNSSuffix returns the default "svc.cluster.local" dns suffix, or the user's provided -// override value. -func GetServiceDNSSuffix() string { - return clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.InClusterDNSSuffixEnv, - clabernetesconstants.DefaultInClusterDNSSuffix, - ) -} diff --git a/controllers/topology/service_test.go b/controllers/topology/reconciler/service_test.go similarity index 98% rename from controllers/topology/service_test.go rename to controllers/topology/reconciler/service_test.go index 77b57d00..53d3680b 100644 --- a/controllers/topology/service_test.go +++ b/controllers/topology/reconciler/service_test.go @@ -1,11 +1,12 @@ -package topology_test +package reconciler_test import ( "testing" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + "k8s.io/apimachinery/pkg/util/intstr" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -455,7 +456,7 @@ func TestServiceConforms(t *testing.T) { func(t *testing.T) { t.Logf("%s: starting", testCase.name) - actual := clabernetescontrollerstopology.ServiceConforms( + actual := clabernetescontrollerstopologyreconciler.ServiceConforms( testCase.existing, testCase.rendered, testCase.ownerUID, diff --git a/controllers/topology/reconciler/serviceexpose.go b/controllers/topology/reconciler/serviceexpose.go new file mode 100644 index 00000000..e73b5029 --- /dev/null +++ b/controllers/topology/reconciler/serviceexpose.go @@ -0,0 +1,286 @@ +package reconciler + +import ( + "fmt" + "sort" + "strings" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +// NewServiceExposeReconciler returns an instance of ServiceExposeReconciler. +func NewServiceExposeReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ServiceExposeReconciler { + return &ServiceExposeReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// ServiceExposeReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating the "expose" service for a +// clabernetes topology resource. +type ServiceExposeReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of services that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object +// that contains the missing, extra, and current services for the topology. +func (r *ServiceExposeReconciler) Resolve( + ownedServices *k8scorev1.ServiceList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service]{ + Current: map[string]*k8scorev1.Service{}, + } + + for i := range ownedServices.Items { + labels := ownedServices.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] + + if topologyServiceType != clabernetesconstants.TopologyServiceTypeExpose { + // not the kind of service we're looking for here, we only care about the services + // used for exposing nodes here. + continue + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + services.Current[nodeName] = &ownedServices.Items[i] + } + + exposedNodes := make([]string, 0) + + disableAutoExpose := owningTopology.GetTopologyCommonSpec().DisableAutoExpose + + for nodeName, nodeData := range clabernetesConfigs { + // if disable auto expose is true *and* there are no ports defined for the node *and* + // there are no default ports defined for the topology we can skip the node from an expose + // perspective. + if disableAutoExpose && + len(nodeData.Topology.Nodes[nodeName].Ports) == 0 && + len(nodeData.Topology.Defaults.Ports) == 0 { + continue + } + + exposedNodes = append(exposedNodes, nodeName) + } + + services.SetMissing(exposedNodes) + services.SetExtra(exposedNodes) + + return services, nil +} + +func (r *ServiceExposeReconciler) renderServiceBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8scorev1.Service { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + + } + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.ServiceSpec{ + Selector: selectorLabels, + Type: clabernetesconstants.KubernetesServiceLoadBalancerType, + }, + } +} + +func (r *ServiceExposeReconciler) parseContainerlabTopologyPortsSection( + portDefinition string, +) (bool, *k8scorev1.ServicePort) { + typedPort, err := clabernetesutilcontainerlab.ProcessPortDefinition(portDefinition) + if err != nil { + r.log.Warnf("skipping port due to the following error: %s", err) + + return true, nil + } + + return false, &k8scorev1.ServicePort{ + Name: fmt.Sprintf( + "port-%d-%s", typedPort.DestinationPort, strings.ToLower(typedPort.Protocol), + ), + Protocol: k8scorev1.Protocol(typedPort.Protocol), + Port: int32(typedPort.DestinationPort), + TargetPort: intstr.IntOrString{ + IntVal: int32(typedPort.ExposePort), + }, + } +} + +func (r *ServiceExposeReconciler) renderServicePorts( + service *k8scorev1.Service, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) { + owningTopologyStatus.NodeExposedPorts[nodeName] = &clabernetesapistopologyv1alpha1.ExposedPorts{ + TCPPorts: make([]int, 0), + UDPPorts: make([]int, 0), + } + + ports := make([]k8scorev1.ServicePort, 0) + + // for actual containerlab configs we copy the users given defaults into each "sub topology" -- + // so in the case of containerlab we want to make sure we also iterate over the "default" or + // topology wide ports that were specified. in this process we dont want to duplicate things, so + // we use a simple set implementation to make sure we aren't doubling up on any port + // definitions. + allContainerlabPorts := clabernetesutil.NewStringSet() + + allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Nodes[nodeName].Ports) + + allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Defaults.Ports) + + allContainerlabPortsItems := allContainerlabPorts.Items() + sort.Strings(allContainerlabPortsItems) + + for _, portDefinition := range allContainerlabPortsItems { + shouldSkip, port := r.parseContainerlabTopologyPortsSection(portDefinition) + + if shouldSkip { + continue + } + + ports = append(ports, *port) + + // dont forget to update the exposed ports status bits + if port.Protocol == clabernetesconstants.TCP { + owningTopologyStatus.NodeExposedPorts[nodeName].TCPPorts = append( + owningTopologyStatus.NodeExposedPorts[nodeName].TCPPorts, + int(port.Port), + ) + } else { + owningTopologyStatus.NodeExposedPorts[nodeName].UDPPorts = append( + owningTopologyStatus.NodeExposedPorts[nodeName].UDPPorts, + int(port.Port), + ) + } + } + + service.Spec.Ports = ports +} + +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final expose service for this node. +func (r *ServiceExposeReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) *k8scorev1.Service { + owningTopologyName := owningTopology.GetName() + + service := r.renderServiceBase( + fmt.Sprintf("%s-%s", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + r.renderServicePorts( + service, + owningTopologyStatus, + clabernetesConfigs, + nodeName, + ) + + return service +} + +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final expose services for the given nodes. +func (r *ServiceExposeReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeNames []string, +) []*k8scorev1.Service { + services := make([]*k8scorev1.Service, len(nodeNames)) + + for idx, nodeName := range nodeNames { + services[idx] = r.Render( + owningTopology, + owningTopologyStatus, + clabernetesConfigs, + nodeName, + ) + } + + return services +} + +// Conforms checks if the existingService conforms with the renderedService. +func (r *ServiceExposeReconciler) Conforms( + existingService, + renderedService *k8scorev1.Service, + expectedOwnerUID apimachinerytypes.UID, +) bool { + return ServiceConforms(existingService, renderedService, expectedOwnerUID) +} diff --git a/controllers/topology/reconciler/servicefabric.go b/controllers/topology/reconciler/servicefabric.go new file mode 100644 index 00000000..94bbdc3f --- /dev/null +++ b/controllers/topology/reconciler/servicefabric.go @@ -0,0 +1,197 @@ +package reconciler + +import ( + "fmt" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NewServiceFabricReconciler returns an instance of ServiceFabricReconciler. +func NewServiceFabricReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ServiceFabricReconciler { + return &ServiceFabricReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// ServiceFabricReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating the "fabric" service for a +// clabernetes topology resource. +type ServiceFabricReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of services that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object +// that contains the missing, extra, and current services for the topology. +func (r *ServiceFabricReconciler) Resolve( + ownedServices *k8scorev1.ServiceList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + _ clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service]{ + Current: map[string]*k8scorev1.Service{}, + } + + for i := range ownedServices.Items { + labels := ownedServices.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] + + if topologyServiceType != clabernetesconstants.TopologyServiceTypeFabric { + // not the kind of service we're looking for here, we only care about the services + // used for connecting the nodes together here. + continue + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + services.Current[nodeName] = &ownedServices.Items[i] + } + + allNodes := make([]string, len(clabernetesConfigs)) + + var nodeIdx int + + for nodeName := range clabernetesConfigs { + allNodes[nodeIdx] = nodeName + + nodeIdx++ + } + + services.SetMissing(allNodes) + services.SetExtra(allNodes) + + return services, nil +} + +func (r *ServiceFabricReconciler) renderServiceBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8scorev1.Service { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + + } + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "vxlan", + Protocol: clabernetesconstants.UDP, + Port: clabernetesconstants.VXLANServicePort, + TargetPort: intstr.IntOrString{ + IntVal: clabernetesconstants.VXLANServicePort, + }, + }, + }, + Selector: selectorLabels, + Type: clabernetesconstants.KubernetesServiceClusterIPType, + }, + } +} + +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final fabric service for this node. +func (r *ServiceFabricReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + nodeName string, +) *k8scorev1.Service { + owningTopologyName := owningTopology.GetName() + + service := r.renderServiceBase( + fmt.Sprintf("%s-%s-vx", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + return service +} + +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final fabric services for the given nodes. +func (r *ServiceFabricReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + nodeNames []string, +) []*k8scorev1.Service { + services := make([]*k8scorev1.Service, len(nodeNames)) + + for idx, nodeName := range nodeNames { + services[idx] = r.Render( + owningTopology, + nodeName, + ) + } + + return services +} + +// Conforms checks if the existingService conforms with the renderedService. +func (r *ServiceFabricReconciler) Conforms( + existingService, + renderedService *k8scorev1.Service, + expectedOwnerUID apimachinerytypes.UID, +) bool { + return ServiceConforms(existingService, renderedService, expectedOwnerUID) +} diff --git a/controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json b/controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json similarity index 100% rename from controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json rename to controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json diff --git a/controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json b/controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json similarity index 100% rename from controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json rename to controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json similarity index 96% rename from controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json rename to controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json index 0a3cb541..f8f9ccb1 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json @@ -6,6 +6,7 @@ "labels": { "clabernetes/app": "clabernetes", "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", "clabernetes/topologyNode": "srl1", "clabernetes/topologyOwner": "render-deployment-test" } @@ -26,6 +27,7 @@ "labels": { "clabernetes/app": "clabernetes", "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", "clabernetes/topologyNode": "srl1", "clabernetes/topologyOwner": "render-deployment-test" } diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json new file mode 100644 index 00000000..7977c5be --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json @@ -0,0 +1,90 @@ +{ + "srl1": [ + { + "id": 1, + "localNodeName": "srl1", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + }, + { + "id": 2, + "localNodeName": "srl1", + "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", + "remoteNodeName": "srl3", + "localLinkName": "e1-2", + "remoteLinkName": "e1-1" + } + ], + "srl2": [ + { + "id": 1, + "localNodeName": "srl2", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + }, + { + "id": 3, + "localNodeName": "srl2", + "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", + "remoteNodeName": "srl3", + "localLinkName": "e1-2", + "remoteLinkName": "e1-2" + }, + { + "id": 4, + "localNodeName": "srl2", + "remoteName": "topo-1-srl4.clabernetes.svc.cluster.local", + "remoteNodeName": "srl4", + "localLinkName": "e1-3", + "remoteLinkName": "e1-1" + } + ], + "srl3": [ + { + "id": 2, + "localNodeName": "srl3", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-2" + }, + { + "id": 3, + "localNodeName": "srl3", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-2", + "remoteLinkName": "e1-2" + }, + { + "id": 5, + "localNodeName": "srl3", + "remoteName": "topo-1-srl4.clabernetes.svc.cluster.local", + "remoteNodeName": "srl4", + "localLinkName": "e1-3", + "remoteLinkName": "e1-2" + } + ], + "srl4": [ + { + "id": 4, + "localNodeName": "srl4", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-3" + }, + { + "id": 5, + "localNodeName": "srl4", + "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", + "remoteNodeName": "srl3", + "localLinkName": "e1-2", + "remoteLinkName": "e1-3" + } + ] +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json new file mode 100644 index 00000000..96581703 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json @@ -0,0 +1,22 @@ +{ + "srl1": [ + { + "id": 1, + "localNodeName": "srl1", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ], + "srl2": [ + { + "id": 1, + "localNodeName": "srl2", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ] +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json new file mode 100644 index 00000000..96581703 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json @@ -0,0 +1,22 @@ +{ + "srl1": [ + { + "id": 1, + "localNodeName": "srl1", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ], + "srl2": [ + { + "id": 1, + "localNodeName": "srl2", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ] +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json new file mode 100644 index 00000000..96581703 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json @@ -0,0 +1,22 @@ +{ + "srl1": [ + { + "id": 1, + "localNodeName": "srl1", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ], + "srl2": [ + { + "id": 1, + "localNodeName": "srl2", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ] +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json new file mode 100644 index 00000000..96581703 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json @@ -0,0 +1,22 @@ +{ + "srl1": [ + { + "id": 1, + "localNodeName": "srl1", + "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", + "remoteNodeName": "srl2", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ], + "srl2": [ + { + "id": 1, + "localNodeName": "srl2", + "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", + "remoteNodeName": "srl1", + "localLinkName": "e1-1", + "remoteLinkName": "e1-1" + } + ] +} \ No newline at end of file diff --git a/controllers/topology/serviceexpose.go b/controllers/topology/serviceexpose.go deleted file mode 100644 index 54bafbf1..00000000 --- a/controllers/topology/serviceexpose.go +++ /dev/null @@ -1,359 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "sort" - "strings" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveExposeServices( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedServices, error) { - ownedServices := &k8scorev1.ServiceList{} - - err := r.Client.List( - ctx, - ownedServices, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned expose services, error: '%s'", err) - - return nil, err - } - - services := &clabernetescontrollers.ResolvedServices{ - Current: map[string]*k8scorev1.Service{}, - } - - for i := range ownedServices.Items { - labels := ownedServices.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] - - if topologyServiceType != clabernetesconstants.TopologyServiceTypeExpose { - // not the kind of service we're looking for here, we only care about the services - // used for exposing nodes here. - continue - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - services.Current[nodeName] = &ownedServices.Items[i] - } - - commonTopologySpec := obj.GetTopologyCommonSpec() - - exposedNodes := make([]string, 0) - - for nodeName, nodeData := range clabernetesConfigs { - // if disable auto expose is true *and* there are no ports defined for the node *and* - // there are no default ports defined for the topology we can skip the node from an expose - // perspective. - if commonTopologySpec.DisableAutoExpose && - len(nodeData.Topology.Nodes[nodeName].Ports) == 0 && - len(nodeData.Topology.Defaults.Ports) == 0 { - continue - } - - exposedNodes = append(exposedNodes, nodeName) - } - - services.Missing = clabernetesutil.StringSliceDifference( - services.CurrentServiceNames(), - exposedNodes, - ) - - r.Log.Debugf( - "expose services are missing for the following nodes: %s", - services.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - exposedNodes, - services.CurrentServiceNames(), - ) - - r.Log.Debugf( - "extraneous expose services exist for following nodes: %s", - extraEndpointDeployments, - ) - - services.Extra = make([]*k8scorev1.Service, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - services.Extra[idx] = services.Current[endpoint] - } - - return services, nil -} - -func (r *Reconciler) pruneExposeServices( - ctx context.Context, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("pruning extraneous expose services") - - for _, extraDeployment := range services.Extra { - r.Log.Debugf( - "removing expose service '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing expose service '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceExposeServices( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - objTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("creating missing expose services") - - for _, nodeName := range services.Missing { - service := r.renderExposeService( - obj, - objTopologyStatus, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating expose service '%s/%s'", - service.Namespace, - service.Name, - ) - - err = r.Client.Create(ctx, service) - if err != nil { - r.Log.Criticalf( - "failed creating expose service '%s/%s' error: %s", - service.Namespace, - service.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing expose services") - - for nodeName, service := range services.Current { - r.Log.Debugf( - "comparing existing expose service '%s/%s' to desired state", - service.Namespace, - service.Name, - ) - - expectedService := r.renderExposeService( - obj, - objTopologyStatus, - clabernetesConfigs, - nodeName, - ) - - if len(service.Status.LoadBalancer.Ingress) == 1 { - // can/would this ever be more than 1? i dunno? - address := service.Status.LoadBalancer.Ingress[0].IP - if address != "" { - objTopologyStatus.NodeExposedPorts[nodeName].LoadBalancerAddress = address - } - } - - err := ctrlruntimeutil.SetOwnerReference(obj, expectedService, r.Client.Scheme()) - if err != nil { - return err - } - - if !ServiceConforms(service, expectedService, obj.GetUID()) { - r.Log.Debugf( - "comparing existing expose service '%s/%s' spec does not conform to desired "+ - "state, updating", - service.Namespace, - service.Name, - ) - - err = r.Client.Update(ctx, expectedService) - if err != nil { - r.Log.Criticalf( - "failed updating expose service '%s/%s' error: %s", - expectedService.Namespace, - expectedService.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) parseContainerlabTopologyPortsSection( - portDefinition string, -) (bool, *k8scorev1.ServicePort) { - typedPort, err := clabernetesutilcontainerlab.ProcessPortDefinition(portDefinition) - if err != nil { - r.Log.Warnf("skipping port due to the following error: %s", err) - - return true, nil - } - - return false, &k8scorev1.ServicePort{ - Name: fmt.Sprintf( - "port-%d-%s", typedPort.DestinationPort, strings.ToLower(typedPort.Protocol), - ), - Protocol: k8scorev1.Protocol(typedPort.Protocol), - Port: int32(typedPort.DestinationPort), - TargetPort: intstr.IntOrString{ - IntVal: int32(typedPort.ExposePort), - }, - } -} - -func (r *Reconciler) renderExposeService( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - objTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, -) *k8scorev1.Service { - configManager := r.ConfigManagerGetter() - globalAnnotations, globalLabels := configManager.GetAllMetadata() - - name := obj.GetName() - - objTopologyStatus.NodeExposedPorts[nodeName] = &clabernetesapistopologyv1alpha1.ExposedPorts{ - TCPPorts: make([]int, 0), - UDPPorts: make([]int, 0), - } - - serviceName := fmt.Sprintf("%s-%s", name, nodeName) - - labels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: serviceName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, - clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll - } - - for k, v := range globalLabels { - labels[k] = v - } - - ports := make([]k8scorev1.ServicePort, 0) - - // for actual containerlab configs we copy the users given defaults into each "sub topology" -- - // so in the case of containerlab we want to make sure we also iterate over the "default" or - // topology wide ports that were specified. in this process we dont want to duplicate things, so - // we use a simple set implementation to make sure we aren't doubling up on any port - // definitions. - allContainerlabPorts := clabernetesutil.NewStringSet() - - allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Nodes[nodeName].Ports) - - allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Defaults.Ports) - - allContainerlabPortsItems := allContainerlabPorts.Items() - sort.Strings(allContainerlabPortsItems) - - for _, portDefinition := range allContainerlabPortsItems { - shouldSkip, port := r.parseContainerlabTopologyPortsSection(portDefinition) - - if shouldSkip { - continue - } - - ports = append(ports, *port) - - // dont forget to update the exposed ports status bits - if port.Protocol == clabernetesconstants.TCP { - objTopologyStatus.NodeExposedPorts[nodeName].TCPPorts = append( - objTopologyStatus.NodeExposedPorts[nodeName].TCPPorts, - int(port.Port), - ) - } else { - objTopologyStatus.NodeExposedPorts[nodeName].UDPPorts = append( - objTopologyStatus.NodeExposedPorts[nodeName].UDPPorts, - int(port.Port), - ) - } - } - - service := &k8scorev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: obj.GetNamespace(), - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8scorev1.ServiceSpec{ - Ports: ports, - Selector: map[string]string{ - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - }, - Type: "LoadBalancer", - }, - } - - return service -} diff --git a/controllers/topology/servicefabric.go b/controllers/topology/servicefabric.go deleted file mode 100644 index 3fd77d68..00000000 --- a/controllers/topology/servicefabric.go +++ /dev/null @@ -1,267 +0,0 @@ -package topology - -import ( - "context" - "fmt" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveFabricServices( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedServices, error) { - ownedServices := &k8scorev1.ServiceList{} - - err := r.Client.List( - ctx, - ownedServices, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned services, error: '%s'", err) - - return nil, err - } - - services := &clabernetescontrollers.ResolvedServices{ - Current: map[string]*k8scorev1.Service{}, - } - - for i := range ownedServices.Items { - labels := ownedServices.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] - - if topologyServiceType != clabernetesconstants.TopologyServiceTypeFabric { - // not the kind of service we're looking for here, we only care about the services - // used for connecting the nodes together here. - continue - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - services.Current[nodeName] = &ownedServices.Items[i] - } - - allNodes := make([]string, len(clabernetesConfigs)) - - var nodeIdx int - - for nodeName := range clabernetesConfigs { - allNodes[nodeIdx] = nodeName - - nodeIdx++ - } - - services.Missing = clabernetesutil.StringSliceDifference( - services.CurrentServiceNames(), - allNodes, - ) - - r.Log.Debugf( - "services are missing for the following nodes: %s", - services.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - allNodes, - services.CurrentServiceNames(), - ) - - r.Log.Debugf( - "extraneous services exist for following nodes: %s", - extraEndpointDeployments, - ) - - services.Extra = make([]*k8scorev1.Service, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - services.Extra[idx] = services.Current[endpoint] - } - - return services, nil -} - -func (r *Reconciler) pruneFabricServices( - ctx context.Context, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("pruning extraneous services") - - for _, extraDeployment := range services.Extra { - r.Log.Debugf( - "removing service '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing service '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceFabricServices( - ctx context.Context, - obj ctrlruntimeclient.Object, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("creating missing services") - - for _, nodeName := range services.Missing { - service := r.renderFabricService( - obj, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating service '%s/%s'", - service.Namespace, - service.Name, - ) - - err = r.Client.Create(ctx, service) - if err != nil { - r.Log.Criticalf( - "failed creating service '%s/%s' error: %s", - service.Namespace, - service.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing services") - - for nodeName, service := range services.Current { - r.Log.Debugf( - "comparing existing service '%s/%s' to desired state", - service.Namespace, - service.Name, - ) - - expectedService := r.renderFabricService( - obj, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - if !ServiceConforms(service, expectedService, obj.GetUID()) { - r.Log.Debugf( - "comparing existing service '%s/%s' spec does not conform to desired state, "+ - "updating", - service.Namespace, - service.Name, - ) - - err = r.Client.Update(ctx, expectedService) - if err != nil { - r.Log.Criticalf( - "failed updating service '%s/%s' error: %s", - expectedService.Namespace, - expectedService.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) renderFabricService( - obj ctrlruntimeclient.Object, - nodeName string, -) *k8scorev1.Service { - name := obj.GetName() - - serviceName := fmt.Sprintf("%s-%s-vx", name, nodeName) - - labels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: serviceName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, - clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll - } - - service := &k8scorev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: obj.GetNamespace(), - Labels: labels, - }, - Spec: k8scorev1.ServiceSpec{ - Ports: []k8scorev1.ServicePort{ - { - Name: "vxlan", - Protocol: "UDP", - Port: clabernetesconstants.VXLANServicePort, - TargetPort: intstr.IntOrString{ - IntVal: clabernetesconstants.VXLANServicePort, - }, - }, - }, - Selector: map[string]string{ - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - }, - Type: "ClusterIP", - }, - } - - return service -} diff --git a/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml b/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml index 0d40032f..e91fedc5 100644 --- a/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml +++ b/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml @@ -79,6 +79,8 @@ spec: protocol: TCP targetPort: 60012 selector: + clabernetes/app: clabernetes + clabernetes/name: containerlab-basic-srl1 clabernetes/topologyNode: srl1 clabernetes/topologyOwner: containerlab-basic sessionAffinity: None diff --git a/logging/fake.go b/logging/fake.go new file mode 100644 index 00000000..cb732da5 --- /dev/null +++ b/logging/fake.go @@ -0,0 +1,38 @@ +package logging + +var _ Instance = (*FakeInstance)(nil) + +// FakeInstance is a fake logging instance that does nothing. +type FakeInstance struct{} + +func (i *FakeInstance) Debug(f string) {} + +func (i *FakeInstance) Debugf(f string, a ...interface{}) {} + +func (i *FakeInstance) Info(f string) {} + +func (i *FakeInstance) Infof(f string, a ...interface{}) {} + +func (i *FakeInstance) Warn(f string) {} + +func (i *FakeInstance) Warnf(f string, a ...interface{}) {} + +func (i *FakeInstance) Critical(f string) {} + +func (i *FakeInstance) Criticalf(f string, a ...interface{}) {} + +func (i *FakeInstance) Fatal(f string) {} + +func (i *FakeInstance) Fatalf(f string, a ...interface{}) {} + +func (i *FakeInstance) Write(p []byte) (n int, err error) { + return 0, nil +} + +func (i *FakeInstance) GetName() string { + return "" +} + +func (i *FakeInstance) GetLevel() string { + return "" +} diff --git a/manager/clabernetes.go b/manager/clabernetes.go index 61bbdb54..2dcb2701 100644 --- a/manager/clabernetes.go +++ b/manager/clabernetes.go @@ -6,6 +6,8 @@ import ( "os" "time" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" clabernetesconstants "github.com/srl-labs/clabernetes/constants" @@ -160,7 +162,7 @@ func (c *clabernetes) startup() { var err error - c.namespace, err = clabernetesutil.CurrentNamespace() + c.namespace, err = clabernetesutilkubernetes.CurrentNamespace() if err != nil { c.logger.Criticalf("failed getting current namespace, err: %s", err) diff --git a/manager/election/util.go b/manager/election/util.go index 538bd164..e9ab6d31 100644 --- a/manager/election/util.go +++ b/manager/election/util.go @@ -3,6 +3,8 @@ package election import ( "os" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" ) @@ -16,14 +18,14 @@ const ( func GenerateLeaderIdentity() string { hostname, err := os.Hostname() if err == nil { - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "clabernetes", hostname, clabernetesutil.RandomString(unknownHostnameRandomNameLen), ) } - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "clabernetes", clabernetesutil.RandomString(unknownHostnameRandomNameLen), ) diff --git a/testhelper/kubernetes.go b/testhelper/kubernetes.go index 68a9c243..4ca307b5 100644 --- a/testhelper/kubernetes.go +++ b/testhelper/kubernetes.go @@ -3,6 +3,8 @@ package testhelper import ( "testing" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" ) @@ -48,7 +50,7 @@ func NormalizeKubernetesObject(t *testing.T, object []byte) []byte { // NewTestNamespace generates a namespace for a test. func NewTestNamespace(testName string) string { - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "e2e", testName, clabernetesutil.RandomString(namespaceRandomPad), diff --git a/util/kubernetes.go b/util/kubernetes.go deleted file mode 100644 index ffdd5781..00000000 --- a/util/kubernetes.go +++ /dev/null @@ -1,115 +0,0 @@ -package util - -import ( - "crypto/sha256" - "encoding/hex" - "os" - "strings" - - "gopkg.in/yaml.v3" - k8scorev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -const ( - // NameMaxLen is the maximum length for a kubernetes name. - NameMaxLen = 63 -) - -// CurrentNamespace returns the current kubernetes namespace as read from the KUBE_NAMESPACE env -// var, or the serviceaccount/namespace file on the instance. -func CurrentNamespace() (string, error) { - namespaceFromEnv := os.Getenv("KUBE_NAMESPACE") - if namespaceFromEnv != "" { - return namespaceFromEnv, nil - } - - namespaceFromFile, err := os.ReadFile( - "/var/run/secrets/kubernetes.io/serviceaccount/namespace", - ) - if err != nil { - return "", err - } - - return string(namespaceFromFile), nil -} - -// MustCurrentNamespace returns the current kubernetes namespace or panics. -func MustCurrentNamespace() string { - namespace, err := CurrentNamespace() - if err != nil { - Panic(err.Error()) - } - - return namespace -} - -// SafeConcatNameKubernetes concats all provided strings into a string joined by "-" - if the final -// string is greater than 63 characters, the string will be shortened, and a hash will be used at -// the end of the string to keep it unique, but safely within allowed lengths. -func SafeConcatNameKubernetes(name ...string) string { - return SafeConcatNameMax(name, NameMaxLen) -} - -// SafeConcatNameMax concats all provided strings into a string joined by "-" - if the final string -// is greater than max characters, the string will be shortened, and a hash will be used at the end -// of the string to keep it unique, but safely within allowed lengths. -func SafeConcatNameMax(name []string, max int) string { - finalName := strings.Join(name, "-") - - if len(finalName) <= max { - return finalName - } - - digest := sha256.Sum256([]byte(finalName)) - - return finalName[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] -} - -type resourceQuantities struct { - CPU string `yaml:"cpu"` - Memory string `yaml:"memory"` -} - -type resourceRequirements struct { - Requests resourceQuantities `yaml:"requests"` - Limits resourceQuantities `yaml:"limits"` -} - -func (r *resourceRequirements) toK8sResourceRequirements() *k8scorev1.ResourceRequirements { - out := &k8scorev1.ResourceRequirements{ - Limits: map[k8scorev1.ResourceName]resource.Quantity{}, - Requests: map[k8scorev1.ResourceName]resource.Quantity{}, - } - - if r.Requests.Memory != "" { - out.Requests["memory"] = resource.MustParse(r.Requests.Memory) - } - - if r.Requests.CPU != "" { - out.Requests["cpu"] = resource.MustParse(r.Requests.CPU) - } - - if r.Limits.Memory != "" { - out.Limits["memory"] = resource.MustParse(r.Limits.Memory) - } - - if r.Limits.CPU != "" { - out.Limits["cpu"] = resource.MustParse(r.Limits.CPU) - } - - return out -} - -// YAMLToK8sResourceRequirements accepts a yaml string that looks suspiciously like k8s resources -// for a container and converts it to k8scorev1.ResourceRequirements. -func YAMLToK8sResourceRequirements(asYAML string) (*k8scorev1.ResourceRequirements, error) { - out := &resourceRequirements{} - - err := yaml.Unmarshal([]byte(asYAML), out) - if err != nil { - return nil, err - } - - return out.toK8sResourceRequirements(), nil -} diff --git a/util/kubernetes/containers.go b/util/kubernetes/containers.go new file mode 100644 index 00000000..13643577 --- /dev/null +++ b/util/kubernetes/containers.go @@ -0,0 +1,33 @@ +package kubernetes + +import ( + "reflect" + + k8scorev1 "k8s.io/api/core/v1" +) + +// ContainersEqual returns true if the existing container slice matches the rendered container slice +// it ignores slice order. +func ContainersEqual(existing, rendered []k8scorev1.Container) bool { + if len(existing) != len(rendered) { + return false + } + + for existingIdx := range existing { + var matched bool + + for renderedIdx := range rendered { + if reflect.DeepEqual(existing[existingIdx], rendered[renderedIdx]) { + matched = true + + break + } + } + + if !matched { + return false + } + } + + return true +} diff --git a/controllers/meta.go b/util/kubernetes/meta.go similarity index 97% rename from controllers/meta.go rename to util/kubernetes/meta.go index 6f8a6a9d..1eba55b7 100644 --- a/controllers/meta.go +++ b/util/kubernetes/meta.go @@ -1,4 +1,4 @@ -package controllers +package kubernetes // AnnotationsOrLabelsConform returns false if the existing labels/annotations (or really just map) // does *not* have all the keys/values from the expected/rendered labels/annotations. diff --git a/util/kubernetes/names.go b/util/kubernetes/names.go new file mode 100644 index 00000000..10287fad --- /dev/null +++ b/util/kubernetes/names.go @@ -0,0 +1,34 @@ +package kubernetes + +import ( + "crypto/sha256" + "encoding/hex" + "strings" +) + +const ( + // NameMaxLen is the maximum length for a kubernetes name. + NameMaxLen = 63 +) + +// SafeConcatNameKubernetes concats all provided strings into a string joined by "-" - if the final +// string is greater than 63 characters, the string will be shortened, and a hash will be used at +// the end of the string to keep it unique, but safely within allowed lengths. +func SafeConcatNameKubernetes(name ...string) string { + return SafeConcatNameMax(name, NameMaxLen) +} + +// SafeConcatNameMax concats all provided strings into a string joined by "-" - if the final string +// is greater than max characters, the string will be shortened, and a hash will be used at the end +// of the string to keep it unique, but safely within allowed lengths. +func SafeConcatNameMax(name []string, max int) string { + finalName := strings.Join(name, "-") + + if len(finalName) <= max { + return finalName + } + + digest := sha256.Sum256([]byte(finalName)) + + return finalName[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] +} diff --git a/util/kubernetes_test.go b/util/kubernetes/names_test.go similarity index 50% rename from util/kubernetes_test.go rename to util/kubernetes/names_test.go index 7128d8d8..da79c67e 100644 --- a/util/kubernetes_test.go +++ b/util/kubernetes/names_test.go @@ -1,15 +1,10 @@ -package util_test +package kubernetes_test import ( - "reflect" "testing" - "k8s.io/apimachinery/pkg/api/resource" - - k8scorev1 "k8s.io/api/core/v1" - clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" - clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" ) func TestSafeConcatNameKubernetes(t *testing.T) { @@ -43,7 +38,7 @@ func TestSafeConcatNameKubernetes(t *testing.T) { for _, tc := range cases { t.Logf("%s: starting", tc.name) - actual := clabernetesutil.SafeConcatNameKubernetes(tc.in...) + actual := clabernetesutilkubernetes.SafeConcatNameKubernetes(tc.in...) if actual != tc.expected { clabernetestesthelper.FailOutput(t, actual, tc.expected) } @@ -85,73 +80,9 @@ func TestSafeConcatNameMax(t *testing.T) { for _, tc := range cases { t.Logf("%s: starting", tc.name) - actual := clabernetesutil.SafeConcatNameMax(tc.in, tc.max) + actual := clabernetesutilkubernetes.SafeConcatNameMax(tc.in, tc.max) if actual != tc.expected { clabernetestesthelper.FailOutput(t, actual, tc.expected) } } } - -func TestYAMLToK8sResourceRequirements(t *testing.T) { - cases := []struct { - name string - in string - expected *k8scorev1.ResourceRequirements - }{ - { - name: "simple", - in: `--- -requests: - memory: 128Mi - cpu: 50m -`, - expected: &k8scorev1.ResourceRequirements{ - Limits: k8scorev1.ResourceList{}, - Requests: k8scorev1.ResourceList{ - "memory": resource.MustParse("128Mi"), - "cpu": resource.MustParse("50m"), - }, - }, - }, - { - name: "simple", - in: `--- -requests: - memory: 128Mi - cpu: 50m -limits: - memory: 256Mi - cpu: 100m -`, - expected: &k8scorev1.ResourceRequirements{ - Limits: k8scorev1.ResourceList{ - "memory": resource.MustParse("256Mi"), - "cpu": resource.MustParse("100m"), - }, - Requests: k8scorev1.ResourceList{ - "memory": resource.MustParse("128Mi"), - "cpu": resource.MustParse("50m"), - }, - }, - }, - } - - for _, testCase := range cases { - t.Run( - testCase.name, - func(t *testing.T) { - t.Logf("%s: starting", testCase.name) - - actual, err := clabernetesutil.YAMLToK8sResourceRequirements(testCase.in) - if err != nil { - t.Fatalf( - "failed calling YAMLToK8sResourceRequirements, error: %s", err, - ) - } - - if !reflect.DeepEqual(actual, testCase.expected) { - clabernetestesthelper.FailOutput(t, actual, testCase.expected) - } - }) - } -} diff --git a/util/kubernetes/namespace.go b/util/kubernetes/namespace.go new file mode 100644 index 00000000..bcdc9aa8 --- /dev/null +++ b/util/kubernetes/namespace.go @@ -0,0 +1,35 @@ +package kubernetes + +import ( + "os" + + clabernetesutil "github.com/srl-labs/clabernetes/util" +) + +// CurrentNamespace returns the current kubernetes namespace as read from the KUBE_NAMESPACE env +// var, or the serviceaccount/namespace file on the instance. +func CurrentNamespace() (string, error) { + namespaceFromEnv := os.Getenv("KUBE_NAMESPACE") + if namespaceFromEnv != "" { + return namespaceFromEnv, nil + } + + namespaceFromFile, err := os.ReadFile( + "/var/run/secrets/kubernetes.io/serviceaccount/namespace", + ) + if err != nil { + return "", err + } + + return string(namespaceFromFile), nil +} + +// MustCurrentNamespace returns the current kubernetes namespace or panics. +func MustCurrentNamespace() string { + namespace, err := CurrentNamespace() + if err != nil { + clabernetesutil.Panic(err.Error()) + } + + return namespace +} diff --git a/util/kubernetes/objectdiffer.go b/util/kubernetes/objectdiffer.go new file mode 100644 index 00000000..db873a60 --- /dev/null +++ b/util/kubernetes/objectdiffer.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + clabernetesutil "github.com/srl-labs/clabernetes/util" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ObjectDiffer holds objets of type T -- used for comparing current, missing, and extraneous +// objects in the cluster. +type ObjectDiffer[T ctrlruntimeclient.Object] struct { + // Current objects by endpoint name + Current map[string]T + // Missing objects by endpoint name + Missing []string + // Extra objects that should be pruned + Extra []T +} + +// CurrentObjectNames returns a slice of the names of the current objects. +func (d *ObjectDiffer[T]) CurrentObjectNames() []string { + names := make([]string, len(d.Current)) + + var idx int + + for k := range d.Current { + names[idx] = k + + idx++ + } + + return names +} + +// SetMissing sets the missing objects based on the slice of all expected object names. +func (d *ObjectDiffer[T]) SetMissing(allExpectedNames []string) { + d.Missing = clabernetesutil.StringSliceDifference( + d.CurrentObjectNames(), + allExpectedNames, + ) +} + +// SetExtra sets the extra objects based on the slice of all expected object names and the current +// objects -- `Current` must be set prior to calling this or things will be weird. +func (d *ObjectDiffer[T]) SetExtra(allExpectedNames []string) { + extraNames := clabernetesutil.StringSliceDifference( + allExpectedNames, + d.CurrentObjectNames(), + ) + + d.Extra = make([]T, len(extraNames)) + + for idx, extraName := range extraNames { + d.Extra[idx] = d.Current[extraName] + } +} diff --git a/util/kubernetes/resources.go b/util/kubernetes/resources.go new file mode 100644 index 00000000..8e09fbb6 --- /dev/null +++ b/util/kubernetes/resources.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + "gopkg.in/yaml.v3" + k8scorev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type resourceQuantities struct { + CPU string `yaml:"cpu"` + Memory string `yaml:"memory"` +} + +type resourceRequirements struct { + Requests resourceQuantities `yaml:"requests"` + Limits resourceQuantities `yaml:"limits"` +} + +func (r *resourceRequirements) toK8sResourceRequirements() *k8scorev1.ResourceRequirements { + out := &k8scorev1.ResourceRequirements{ + Limits: map[k8scorev1.ResourceName]resource.Quantity{}, + Requests: map[k8scorev1.ResourceName]resource.Quantity{}, + } + + if r.Requests.Memory != "" { + out.Requests["memory"] = resource.MustParse(r.Requests.Memory) + } + + if r.Requests.CPU != "" { + out.Requests["cpu"] = resource.MustParse(r.Requests.CPU) + } + + if r.Limits.Memory != "" { + out.Limits["memory"] = resource.MustParse(r.Limits.Memory) + } + + if r.Limits.CPU != "" { + out.Limits["cpu"] = resource.MustParse(r.Limits.CPU) + } + + return out +} + +// YAMLToK8sResourceRequirements accepts a yaml string that looks suspiciously like k8s resources +// for a container and converts it to k8scorev1.ResourceRequirements. +func YAMLToK8sResourceRequirements(asYAML string) (*k8scorev1.ResourceRequirements, error) { + out := &resourceRequirements{} + + err := yaml.Unmarshal([]byte(asYAML), out) + if err != nil { + return nil, err + } + + return out.toK8sResourceRequirements(), nil +} diff --git a/util/kubernetes/resources_test.go b/util/kubernetes/resources_test.go new file mode 100644 index 00000000..933350dc --- /dev/null +++ b/util/kubernetes/resources_test.go @@ -0,0 +1,75 @@ +package kubernetes_test + +import ( + "reflect" + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestYAMLToK8sResourceRequirements(t *testing.T) { + cases := []struct { + name string + in string + expected *k8scorev1.ResourceRequirements + }{ + { + name: "simple", + in: `--- +requests: + memory: 128Mi + cpu: 50m +`, + expected: &k8scorev1.ResourceRequirements{ + Limits: k8scorev1.ResourceList{}, + Requests: k8scorev1.ResourceList{ + "memory": resource.MustParse("128Mi"), + "cpu": resource.MustParse("50m"), + }, + }, + }, + { + name: "simple", + in: `--- +requests: + memory: 128Mi + cpu: 50m +limits: + memory: 256Mi + cpu: 100m +`, + expected: &k8scorev1.ResourceRequirements{ + Limits: k8scorev1.ResourceList{ + "memory": resource.MustParse("256Mi"), + "cpu": resource.MustParse("100m"), + }, + Requests: k8scorev1.ResourceList{ + "memory": resource.MustParse("128Mi"), + "cpu": resource.MustParse("50m"), + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual, err := clabernetesutilkubernetes.YAMLToK8sResourceRequirements(testCase.in) + if err != nil { + t.Fatalf( + "failed calling YAMLToK8sResourceRequirements, error: %s", err, + ) + } + + if !reflect.DeepEqual(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/volumes.go b/util/kubernetes/volumes.go new file mode 100644 index 00000000..f3815b7c --- /dev/null +++ b/util/kubernetes/volumes.go @@ -0,0 +1,14 @@ +package kubernetes + +import k8scorev1 "k8s.io/api/core/v1" + +// VolumeAlreadyMounted checks if the given volumeName is already in the existingVolumes. +func VolumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { + for idx := range existingVolumes { + if volumeName == existingVolumes[idx].Name { + return true + } + } + + return false +} From 761a7e34fc8b037363f664480b6a56748076147f Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sun, 22 Oct 2023 15:22:13 -0700 Subject: [PATCH 7/8] test: service tests --- controllers/topology/reconciler/configmap.go | 43 +- .../topology/reconciler/configmap_test.go | 266 ++++++++++++ controllers/topology/reconciler/deployment.go | 8 +- .../topology/reconciler/deployment_test.go | 408 +++++++++++++----- controllers/topology/reconciler/reconciler.go | 4 +- controllers/topology/reconciler/resolve.go | 8 +- .../topology/reconciler/serviceexpose.go | 5 +- .../topology/reconciler/serviceexpose_test.go | 184 ++++++++ .../topology/reconciler/servicefabric.go | 7 +- .../topology/reconciler/servicefabric_test.go | 107 +++++ .../render-deployment/containerlab-debug.json | 102 +++++ .../insecure-registries.json | 102 +++++ .../render-deployment/launcher-log-level.json | 98 +++++ .../render-service/simple-status.json | 30 ++ .../serviceexpose/render-service/simple.json | 113 +++++ .../servicefabric/render-service/simple.json | 35 ++ .../allocate-tunnel-ids/meshy-links.json | 90 ---- .../simple-already-allocated-ids.json | 22 - .../simple-existing-status.json | 22 - .../simple-weirdly-allocated-ids.json | 22 - .../tunnels/allocate-tunnel-ids/simple.json | 22 - util/kubernetes/containers_test.go | 62 +++ util/kubernetes/meta_test.go | 82 ++++ util/kubernetes/names_test.go | 32 +- util/kubernetes/volumes_test.go | 1 + util/{kubernetes => }/objectdiffer.go | 13 +- util/objectdiffer_test.go | 122 ++++++ 27 files changed, 1656 insertions(+), 354 deletions(-) create mode 100644 controllers/topology/reconciler/serviceexpose_test.go create mode 100644 controllers/topology/reconciler/servicefabric_test.go create mode 100755 controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json create mode 100755 controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json create mode 100755 controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json create mode 100755 controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json create mode 100755 controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json create mode 100755 controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json delete mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json delete mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json delete mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json delete mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json delete mode 100644 controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json create mode 100644 util/kubernetes/containers_test.go create mode 100644 util/kubernetes/meta_test.go create mode 100644 util/kubernetes/volumes_test.go rename util/{kubernetes => }/objectdiffer.go (79%) create mode 100644 util/objectdiffer_test.go diff --git a/controllers/topology/reconciler/configmap.go b/controllers/topology/reconciler/configmap.go index 6bd6234c..07a90cba 100644 --- a/controllers/topology/reconciler/configmap.go +++ b/controllers/topology/reconciler/configmap.go @@ -45,44 +45,37 @@ type ConfigMapReconciler struct { // the configmap that will ultimately be referenced when mounting sub-topologies and tunnel data in // the clabernetes launcher pod(s) for a given topology. func (r *ConfigMapReconciler) Render( - namespacedName apimachinerytypes.NamespacedName, + owningTopologyNamespacedName apimachinerytypes.NamespacedName, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, ) (*k8scorev1.ConfigMap, error) { - configManager := r.configManagerGetter() - globalAnnotations, globalLabels := configManager.GetAllMetadata() + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() - configMap := &k8scorev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespacedName.Name, - Namespace: namespacedName.Namespace, - Annotations: globalAnnotations, - Labels: map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: namespacedName.Name, - clabernetesconstants.LabelTopologyOwner: namespacedName.Name, - clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, - }, - }, - Data: map[string]string{}, + labels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: owningTopologyNamespacedName.Name, + clabernetesconstants.LabelTopologyOwner: owningTopologyNamespacedName.Name, + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, } for k, v := range globalLabels { - configMap.Labels[k] = v + labels[k] = v } + data := make(map[string]string) + for nodeName, nodeTopo := range clabernetesConfigs { // always initialize the tunnels keys in the configmap, this way we don't have to have any // special handling for no tunnels and things always look consistent; we'll override this // down below if the node has tunnels of course! - configMap.Data[fmt.Sprintf("%s-tunnels", nodeName)] = "" + data[fmt.Sprintf("%s-tunnels", nodeName)] = "" yamlNodeTopo, err := yaml.Marshal(nodeTopo) if err != nil { return nil, err } - configMap.Data[nodeName] = string(yamlNodeTopo) + data[nodeName] = string(yamlNodeTopo) } for nodeName, nodeTunnels := range tunnels { @@ -91,10 +84,18 @@ func (r *ConfigMapReconciler) Render( return nil, err } - configMap.Data[fmt.Sprintf("%s-tunnels", nodeName)] = string(yamlNodeTunnels) + data[fmt.Sprintf("%s-tunnels", nodeName)] = string(yamlNodeTunnels) } - return configMap, nil + return &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: owningTopologyNamespacedName.Name, + Namespace: owningTopologyNamespacedName.Namespace, + Annotations: annotations, + Labels: labels, + }, + Data: data, + }, nil } // Conforms checks if the existingConfigMap conforms with the renderedConfigMap. diff --git a/controllers/topology/reconciler/configmap_test.go b/controllers/topology/reconciler/configmap_test.go index 82eb6fbb..bb26f558 100644 --- a/controllers/topology/reconciler/configmap_test.go +++ b/controllers/topology/reconciler/configmap_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + claberneteslogging "github.com/srl-labs/clabernetes/logging" clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" @@ -234,3 +236,267 @@ func TestRenderConfigMap(t *testing.T) { ) } } + +func TestConfigMapConforms(t *testing.T) { + cases := []struct { + name string + existing *k8scorev1.ConfigMap + rendered *k8scorev1.ConfigMap + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-data-extra-stuff", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Data: map[string]string{ + "something": "not in the expected", + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-data-missing-stuff", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + Data: map[string]string{ + "something": "we expect expected", + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + + // annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // labels + + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // owner + + { + name: "bad-owner", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewConfigMapReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + actual := reconciler.Conforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} diff --git a/controllers/topology/reconciler/deployment.go b/controllers/topology/reconciler/deployment.go index 71614218..b808d638 100644 --- a/controllers/topology/reconciler/deployment.go +++ b/controllers/topology/reconciler/deployment.go @@ -51,8 +51,8 @@ func (r *DeploymentReconciler) Resolve( ownedDeployments *k8sappsv1.DeploymentList, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, _ clabernetesapistopologyv1alpha1.TopologyCommonObject, -) (*clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment], error) { - deployments := &clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment]{ +) (*clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment], error) { + deployments := &clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment]{ Current: map[string]*k8sappsv1.Deployment{}, } @@ -307,8 +307,8 @@ func (r *DeploymentReconciler) renderDeploymentContainerEnv( } if len(owningTopologyCommonSpec.InsecureRegistries) > 0 { - deployment.Spec.Template.Spec.Containers[0].Env = append( - deployment.Spec.Template.Spec.Containers[0].Env, + envs = append( + envs, k8scorev1.EnvVar{ Name: clabernetesconstants.LauncherInsecureRegistries, Value: strings.Join(owningTopologyCommonSpec.InsecureRegistries, ","), diff --git a/controllers/topology/reconciler/deployment_test.go b/controllers/topology/reconciler/deployment_test.go index a37b7da9..2661fd4d 100644 --- a/controllers/topology/reconciler/deployment_test.go +++ b/controllers/topology/reconciler/deployment_test.go @@ -27,6 +27,297 @@ import ( const renderDeploymentTestName = "deployment/render-deployment" +func TestRenderDeployment(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "containerlab-debug", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + ContainerlabDebug: true, + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "launcher-log-level", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + LauncherLogLevel: "debug", + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux + `, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "insecure-registries", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + InsecureRegistries: []string{"1.2.3.4", "potato.com"}, + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux + `, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.clabernetesConfigs, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + got, + ) + } + + var want k8sappsv1.Deployment + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} + func TestDeploymentConforms(t *testing.T) { cases := []struct { name string @@ -585,120 +876,3 @@ func TestDeploymentConforms(t *testing.T) { }) } } - -func TestRenderDeployment(t *testing.T) { - cases := []struct { - name string - obj clabernetesapistopologyv1alpha1.TopologyCommonObject - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config - nodeName string - }{ - { - name: "simple", - obj: &clabernetesapistopologyv1alpha1.Containerlab{ - ObjectMeta: metav1.ObjectMeta{ - Name: "render-deployment-test", - Namespace: "clabernetes", - }, - Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ - TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, - Config: `--- - name: test - topology: - nodes: - srl1: - kind: srl - image: ghcr.io/nokia/srlinux -`, - }, - }, - clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ - "srl1": { - Name: "srl1", - Prefix: clabernetesutil.ToPointer(""), - Topology: &clabernetesutilcontainerlab.Topology{ - Defaults: &clabernetesutilcontainerlab.NodeDefinition{ - Ports: []string{ - "21022:22/tcp", - "21023:23/tcp", - "21161:161/udp", - "33333:57400/tcp", - "60000:21/tcp", - "60001:80/tcp", - "60002:443/tcp", - "60003:830/tcp", - "60004:5000/tcp", - "60005:5900/tcp", - "60006:6030/tcp", - "60007:9339/tcp", - "60008:9340/tcp", - "60009:9559/tcp", - }, - }, - Kinds: nil, - Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ - "srl1": { - Kind: "srl", - Image: "ghcr.io/nokia/srlinux", - }, - }, - Links: nil, - }, - Debug: false, - }, - }, - nodeName: "srl1", - }, - } - - for _, testCase := range cases { - t.Run( - testCase.name, - func(t *testing.T) { - t.Logf("%s: starting", testCase.name) - - reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( - &claberneteslogging.FakeInstance{}, - clabernetesapistopology.Containerlab, - clabernetesconfig.GetFakeManager, - ) - - got := reconciler.Render( - testCase.obj, - testCase.clabernetesConfigs, - testCase.nodeName, - ) - - if *clabernetestesthelper.Update { - clabernetestesthelper.WriteTestFixtureJSON( - t, - fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), - got, - ) - } - - var want k8sappsv1.Deployment - - err := json.Unmarshal( - clabernetestesthelper.ReadTestFixtureFile( - t, - fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), - ), - &want, - ) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(got.Annotations, want.Annotations) { - clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) - } - if !reflect.DeepEqual(got.Labels, want.Labels) { - clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) - } - if !reflect.DeepEqual(got.Spec, want.Spec) { - clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) - } - }) - } -} diff --git a/controllers/topology/reconciler/reconciler.go b/controllers/topology/reconciler/reconciler.go index ce0c0ea2..26dfcc2b 100644 --- a/controllers/topology/reconciler/reconciler.go +++ b/controllers/topology/reconciler/reconciler.go @@ -6,8 +6,6 @@ import ( "slices" "time" - clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" k8sappsv1 "k8s.io/api/apps/v1" @@ -145,7 +143,7 @@ func (r *Reconciler) reconcileDeploymentsHandleRestarts( owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, previousClabernetesConfigs, currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - deployments *clabernetesutilkubernetes.ObjectDiffer[*k8sappsv1.Deployment], + deployments *clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment], ) error { r.Log.Info("determining nodes needing restart") diff --git a/controllers/topology/reconciler/resolve.go b/controllers/topology/reconciler/resolve.go index 13fda9a4..4b401c97 100644 --- a/controllers/topology/reconciler/resolve.go +++ b/controllers/topology/reconciler/resolve.go @@ -3,9 +3,9 @@ package reconciler import ( "context" - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesutil "github.com/srl-labs/clabernetes/util" - clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesconstants "github.com/srl-labs/clabernetes/constants" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" @@ -24,8 +24,8 @@ func reconcileResolve[T ctrlruntimeclient.Object, TL ctrlruntimeclient.ObjectLis ownedObject TL, currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, - ) (*clabernetesutilkubernetes.ObjectDiffer[T], error), -) (*clabernetesutilkubernetes.ObjectDiffer[T], error) { + ) (*clabernetesutil.ObjectDiffer[T], error), +) (*clabernetesutil.ObjectDiffer[T], error) { // strictly passed for typing reasons _ = ownedType diff --git a/controllers/topology/reconciler/serviceexpose.go b/controllers/topology/reconciler/serviceexpose.go index e73b5029..ed9d1db7 100644 --- a/controllers/topology/reconciler/serviceexpose.go +++ b/controllers/topology/reconciler/serviceexpose.go @@ -16,7 +16,6 @@ import ( clabernetesconstants "github.com/srl-labs/clabernetes/constants" claberneteserrors "github.com/srl-labs/clabernetes/errors" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" k8scorev1 "k8s.io/api/core/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" ) @@ -50,8 +49,8 @@ func (r *ServiceExposeReconciler) Resolve( ownedServices *k8scorev1.ServiceList, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, -) (*clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service], error) { - services := &clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service]{ +) (*clabernetesutil.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutil.ObjectDiffer[*k8scorev1.Service]{ Current: map[string]*k8scorev1.Service{}, } diff --git a/controllers/topology/reconciler/serviceexpose_test.go b/controllers/topology/reconciler/serviceexpose_test.go new file mode 100644 index 00000000..681ff6cf --- /dev/null +++ b/controllers/topology/reconciler/serviceexpose_test.go @@ -0,0 +1,184 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const renderServiceExposeTestName = "serviceexpose/render-service" + +func TestRenderServiceExpose(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-service-expose-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + owningTopologyStatus: &clabernetesapistopologyv1alpha1.TopologyStatus{ + Tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{}, + NodeExposedPorts: map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{}, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceExposeReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.owningTopologyStatus, + testCase.clabernetesConfigs, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceExposeTestName, + testCase.name, + ), + got, + ) + + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s-status.json", + renderServiceExposeTestName, + testCase.name, + ), + testCase.owningTopologyStatus, + ) + } + + var want k8scorev1.Service + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceExposeTestName, + testCase.name, + ), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + var wantStatus *clabernetesapistopologyv1alpha1.TopologyStatus + + err = json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s-status.json", + renderServiceExposeTestName, + testCase.name, + ), + ), + &wantStatus, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + + // also check that the status got updated properly + if !reflect.DeepEqual(testCase.owningTopologyStatus, wantStatus) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} diff --git a/controllers/topology/reconciler/servicefabric.go b/controllers/topology/reconciler/servicefabric.go index 94bbdc3f..e024a8b3 100644 --- a/controllers/topology/reconciler/servicefabric.go +++ b/controllers/topology/reconciler/servicefabric.go @@ -3,6 +3,8 @@ package reconciler import ( "fmt" + clabernetesutil "github.com/srl-labs/clabernetes/util" + claberneteslogging "github.com/srl-labs/clabernetes/logging" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" @@ -10,7 +12,6 @@ import ( clabernetesconstants "github.com/srl-labs/clabernetes/constants" claberneteserrors "github.com/srl-labs/clabernetes/errors" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" @@ -46,8 +47,8 @@ func (r *ServiceFabricReconciler) Resolve( ownedServices *k8scorev1.ServiceList, clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, _ clabernetesapistopologyv1alpha1.TopologyCommonObject, -) (*clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service], error) { - services := &clabernetesutilkubernetes.ObjectDiffer[*k8scorev1.Service]{ +) (*clabernetesutil.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutil.ObjectDiffer[*k8scorev1.Service]{ Current: map[string]*k8scorev1.Service{}, } diff --git a/controllers/topology/reconciler/servicefabric_test.go b/controllers/topology/reconciler/servicefabric_test.go new file mode 100644 index 00000000..85aea31d --- /dev/null +++ b/controllers/topology/reconciler/servicefabric_test.go @@ -0,0 +1,107 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const renderServiceFabricTestName = "servicefabric/render-service" + +func TestRenderServiceFabric(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-service-fabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceFabricReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceFabricTestName, + testCase.name, + ), + got, + ) + } + + var want k8scorev1.Service + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceFabricTestName, + testCase.name, + ), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json new file mode 100755 index 00000000..68960e9f --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json @@ -0,0 +1,102 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + }, + { + "name": "LAUNCHER_CONTAINERLAB_DEBUG", + "value": "true" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json new file mode 100755 index 00000000..55840dcb --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json @@ -0,0 +1,102 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + }, + { + "name": "LAUNCHER_INSECURE_REGISTRIES", + "value": "1.2.3.4,potato.com" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json new file mode 100755 index 00000000..07eba9a8 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json @@ -0,0 +1,98 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "debug" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json new file mode 100755 index 00000000..fb411a8e --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json @@ -0,0 +1,30 @@ +{ + "configs": "", + "configsHash": "", + "tunnels": {}, + "tunnelsHash": "", + "nodeExposedPorts": { + "srl1": { + "loadBalancerAddress": "", + "tcpPorts": [ + 22, + 23, + 57400, + 21, + 80, + 443, + 830, + 5000, + 5900, + 6030, + 9339, + 9340, + 9559 + ], + "udpPorts": [ + 161 + ] + } + }, + "nodeExposedPortsHash": "" +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json new file mode 100755 index 00000000..417f7587 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json @@ -0,0 +1,113 @@ +{ + "metadata": { + "name": "render-service-expose-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-expose-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-expose-test", + "clabernetes/topologyServiceType": "expose" + } + }, + "spec": { + "ports": [ + { + "name": "port-22-tcp", + "protocol": "TCP", + "port": 22, + "targetPort": 21022 + }, + { + "name": "port-23-tcp", + "protocol": "TCP", + "port": 23, + "targetPort": 21023 + }, + { + "name": "port-161-udp", + "protocol": "UDP", + "port": 161, + "targetPort": 21161 + }, + { + "name": "port-57400-tcp", + "protocol": "TCP", + "port": 57400, + "targetPort": 33333 + }, + { + "name": "port-21-tcp", + "protocol": "TCP", + "port": 21, + "targetPort": 60000 + }, + { + "name": "port-80-tcp", + "protocol": "TCP", + "port": 80, + "targetPort": 60001 + }, + { + "name": "port-443-tcp", + "protocol": "TCP", + "port": 443, + "targetPort": 60002 + }, + { + "name": "port-830-tcp", + "protocol": "TCP", + "port": 830, + "targetPort": 60003 + }, + { + "name": "port-5000-tcp", + "protocol": "TCP", + "port": 5000, + "targetPort": 60004 + }, + { + "name": "port-5900-tcp", + "protocol": "TCP", + "port": 5900, + "targetPort": 60005 + }, + { + "name": "port-6030-tcp", + "protocol": "TCP", + "port": 6030, + "targetPort": 60006 + }, + { + "name": "port-9339-tcp", + "protocol": "TCP", + "port": 9339, + "targetPort": 60007 + }, + { + "name": "port-9340-tcp", + "protocol": "TCP", + "port": 9340, + "targetPort": 60008 + }, + { + "name": "port-9559-tcp", + "protocol": "TCP", + "port": 9559, + "targetPort": 60009 + } + ], + "selector": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-expose-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-expose-test" + }, + "type": "LoadBalancer" + }, + "status": { + "loadBalancer": {} + } +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json b/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json new file mode 100755 index 00000000..9b97b36c --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json @@ -0,0 +1,35 @@ +{ + "metadata": { + "name": "render-service-fabric-test-srl1-vx", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-fabric-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-fabric-test", + "clabernetes/topologyServiceType": "fabric" + } + }, + "spec": { + "ports": [ + { + "name": "vxlan", + "protocol": "UDP", + "port": 14789, + "targetPort": 14789 + } + ], + "selector": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-fabric-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-fabric-test" + }, + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json deleted file mode 100644 index 7977c5be..00000000 --- a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/meshy-links.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "srl1": [ - { - "id": 1, - "localNodeName": "srl1", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - }, - { - "id": 2, - "localNodeName": "srl1", - "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", - "remoteNodeName": "srl3", - "localLinkName": "e1-2", - "remoteLinkName": "e1-1" - } - ], - "srl2": [ - { - "id": 1, - "localNodeName": "srl2", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - }, - { - "id": 3, - "localNodeName": "srl2", - "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", - "remoteNodeName": "srl3", - "localLinkName": "e1-2", - "remoteLinkName": "e1-2" - }, - { - "id": 4, - "localNodeName": "srl2", - "remoteName": "topo-1-srl4.clabernetes.svc.cluster.local", - "remoteNodeName": "srl4", - "localLinkName": "e1-3", - "remoteLinkName": "e1-1" - } - ], - "srl3": [ - { - "id": 2, - "localNodeName": "srl3", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-2" - }, - { - "id": 3, - "localNodeName": "srl3", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-2", - "remoteLinkName": "e1-2" - }, - { - "id": 5, - "localNodeName": "srl3", - "remoteName": "topo-1-srl4.clabernetes.svc.cluster.local", - "remoteNodeName": "srl4", - "localLinkName": "e1-3", - "remoteLinkName": "e1-2" - } - ], - "srl4": [ - { - "id": 4, - "localNodeName": "srl4", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-3" - }, - { - "id": 5, - "localNodeName": "srl4", - "remoteName": "topo-1-srl3.clabernetes.svc.cluster.local", - "remoteNodeName": "srl3", - "localLinkName": "e1-2", - "remoteLinkName": "e1-3" - } - ] -} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json deleted file mode 100644 index 96581703..00000000 --- a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-already-allocated-ids.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "srl1": [ - { - "id": 1, - "localNodeName": "srl1", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ], - "srl2": [ - { - "id": 1, - "localNodeName": "srl2", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ] -} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json deleted file mode 100644 index 96581703..00000000 --- a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-existing-status.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "srl1": [ - { - "id": 1, - "localNodeName": "srl1", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ], - "srl2": [ - { - "id": 1, - "localNodeName": "srl2", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ] -} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json deleted file mode 100644 index 96581703..00000000 --- a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple-weirdly-allocated-ids.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "srl1": [ - { - "id": 1, - "localNodeName": "srl1", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ], - "srl2": [ - { - "id": 1, - "localNodeName": "srl2", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ] -} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json b/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json deleted file mode 100644 index 96581703..00000000 --- a/controllers/topology/reconciler/test-fixtures/golden/tunnels/allocate-tunnel-ids/simple.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "srl1": [ - { - "id": 1, - "localNodeName": "srl1", - "remoteName": "topo-1-srl2.clabernetes.svc.cluster.local", - "remoteNodeName": "srl2", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ], - "srl2": [ - { - "id": 1, - "localNodeName": "srl2", - "remoteName": "topo-1-srl1.clabernetes.svc.cluster.local", - "remoteNodeName": "srl1", - "localLinkName": "e1-1", - "remoteLinkName": "e1-1" - } - ] -} \ No newline at end of file diff --git a/util/kubernetes/containers_test.go b/util/kubernetes/containers_test.go new file mode 100644 index 00000000..1e0d5f61 --- /dev/null +++ b/util/kubernetes/containers_test.go @@ -0,0 +1,62 @@ +package kubernetes_test + +import ( + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" +) + +func TestContainersEqual(t *testing.T) { + cases := []struct { + name string + a []k8scorev1.Container + b []k8scorev1.Container + expected bool + }{ + { + name: "simple-empty", + a: []k8scorev1.Container{}, + b: []k8scorev1.Container{}, + expected: true, + }, + { + name: "simple", + a: []k8scorev1.Container{ + { + Name: "something", + }, + }, + b: []k8scorev1.Container{ + { + Name: "something", + }, + }, + expected: true, + }, + { + name: "different-counts", + a: []k8scorev1.Container{ + { + Name: "something", + }, + }, + b: []k8scorev1.Container{}, + expected: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.ContainersEqual(testCase.a, testCase.b) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/meta_test.go b/util/kubernetes/meta_test.go new file mode 100644 index 00000000..cf1d6e32 --- /dev/null +++ b/util/kubernetes/meta_test.go @@ -0,0 +1,82 @@ +package kubernetes_test + +import ( + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" +) + +func TestContainersEqualAnnotationsOrLabelsConform(t *testing.T) { + cases := []struct { + name string + a map[string]string + b map[string]string + expected bool + }{ + { + name: "simple-empty", + a: nil, + b: nil, + expected: true, + }, + { + name: "simple", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "something": "neat", + }, + expected: true, + }, + { + name: "different-keys", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "different": "neat", + }, + expected: false, + }, + { + name: "expected-has-more", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "something": "neat", + "different": "neat", + }, + expected: false, + }, + { + name: "existing-has-more", + a: map[string]string{ + "something": "neat", + "different": "neat", + }, + b: map[string]string{ + "something": "neat", + }, + expected: true, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.AnnotationsOrLabelsConform( + testCase.a, + testCase.b, + ) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/names_test.go b/util/kubernetes/names_test.go index da79c67e..616257ac 100644 --- a/util/kubernetes/names_test.go +++ b/util/kubernetes/names_test.go @@ -35,13 +35,17 @@ func TestSafeConcatNameKubernetes(t *testing.T) { }, } - for _, tc := range cases { - t.Logf("%s: starting", tc.name) + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) - actual := clabernetesutilkubernetes.SafeConcatNameKubernetes(tc.in...) - if actual != tc.expected { - clabernetestesthelper.FailOutput(t, actual, tc.expected) - } + actual := clabernetesutilkubernetes.SafeConcatNameKubernetes(testCase.in...) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) } } @@ -77,12 +81,16 @@ func TestSafeConcatNameMax(t *testing.T) { }, } - for _, tc := range cases { - t.Logf("%s: starting", tc.name) + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) - actual := clabernetesutilkubernetes.SafeConcatNameMax(tc.in, tc.max) - if actual != tc.expected { - clabernetestesthelper.FailOutput(t, actual, tc.expected) - } + actual := clabernetesutilkubernetes.SafeConcatNameMax(testCase.in, testCase.max) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) } } diff --git a/util/kubernetes/volumes_test.go b/util/kubernetes/volumes_test.go new file mode 100644 index 00000000..bdeb4e52 --- /dev/null +++ b/util/kubernetes/volumes_test.go @@ -0,0 +1 @@ +package kubernetes_test diff --git a/util/kubernetes/objectdiffer.go b/util/objectdiffer.go similarity index 79% rename from util/kubernetes/objectdiffer.go rename to util/objectdiffer.go index db873a60..ca03ef70 100644 --- a/util/kubernetes/objectdiffer.go +++ b/util/objectdiffer.go @@ -1,13 +1,8 @@ -package kubernetes - -import ( - clabernetesutil "github.com/srl-labs/clabernetes/util" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" -) +package util // ObjectDiffer holds objets of type T -- used for comparing current, missing, and extraneous // objects in the cluster. -type ObjectDiffer[T ctrlruntimeclient.Object] struct { +type ObjectDiffer[T any] struct { // Current objects by endpoint name Current map[string]T // Missing objects by endpoint name @@ -33,7 +28,7 @@ func (d *ObjectDiffer[T]) CurrentObjectNames() []string { // SetMissing sets the missing objects based on the slice of all expected object names. func (d *ObjectDiffer[T]) SetMissing(allExpectedNames []string) { - d.Missing = clabernetesutil.StringSliceDifference( + d.Missing = StringSliceDifference( d.CurrentObjectNames(), allExpectedNames, ) @@ -42,7 +37,7 @@ func (d *ObjectDiffer[T]) SetMissing(allExpectedNames []string) { // SetExtra sets the extra objects based on the slice of all expected object names and the current // objects -- `Current` must be set prior to calling this or things will be weird. func (d *ObjectDiffer[T]) SetExtra(allExpectedNames []string) { - extraNames := clabernetesutil.StringSliceDifference( + extraNames := StringSliceDifference( allExpectedNames, d.CurrentObjectNames(), ) diff --git a/util/objectdiffer_test.go b/util/objectdiffer_test.go new file mode 100644 index 00000000..ac26c41e --- /dev/null +++ b/util/objectdiffer_test.go @@ -0,0 +1,122 @@ +package util_test + +import ( + "testing" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" +) + +func TestObjectDifferGetCurrentObjectNames(t *testing.T) { + cases := []struct { + name string + current map[string]string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + expected: []string{"one", "two"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + actual := od.CurrentObjectNames() + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} + +func TestObjectDifferSetMissing(t *testing.T) { + cases := []struct { + name string + current map[string]string + allExpected []string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + allExpected: []string{"one", "two", "seven", "eleven"}, + expected: []string{"seven", "eleven"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + od.SetMissing(testCase.allExpected) + + actual := od.Missing + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} + +func TestObjectDifferSeExtra(t *testing.T) { + cases := []struct { + name string + current map[string]string + allExpected []string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + allExpected: []string{"one", "seven", "eleven"}, + expected: []string{"neato"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + od.SetExtra(testCase.allExpected) + + actual := od.Extra + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} From d390e57c3b1b3ee722078c836cea782d6baa4fb6 Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sun, 22 Oct 2023 15:59:50 -0700 Subject: [PATCH 8/8] test: resolve tests --- .../topology/reconciler/deployment_test.go | 106 ++++++++++++ .../topology/reconciler/serviceexpose_test.go | 160 ++++++++++++++++++ .../topology/reconciler/servicefabric_test.go | 110 ++++++++++++ 3 files changed, 376 insertions(+) diff --git a/controllers/topology/reconciler/deployment_test.go b/controllers/topology/reconciler/deployment_test.go index 2661fd4d..857a475c 100644 --- a/controllers/topology/reconciler/deployment_test.go +++ b/controllers/topology/reconciler/deployment_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteslogging "github.com/srl-labs/clabernetes/logging" clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" @@ -27,6 +29,110 @@ import ( const renderDeploymentTestName = "deployment/render-deployment" +func TestResolveDeployment(t *testing.T) { + cases := []struct { + name string + ownedDeployments *k8sappsv1.DeploymentList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8sappsv1.Deployment + }{ + { + name: "simple", + ownedDeployments: &k8sappsv1.DeploymentList{}, + clabernetesConfigs: nil, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8sappsv1.Deployment{}, + }, + { + name: "missing-nodes", + ownedDeployments: &k8sappsv1.DeploymentList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8sappsv1.Deployment{}, + }, + { + name: "extra-nodes", + ownedDeployments: &k8sappsv1.DeploymentList{ + Items: []k8sappsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-deployment-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8sappsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-deployment-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedDeployments, + testCase.clabernetesConfigs, + nil, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + func TestRenderDeployment(t *testing.T) { cases := []struct { name string diff --git a/controllers/topology/reconciler/serviceexpose_test.go b/controllers/topology/reconciler/serviceexpose_test.go index 681ff6cf..2c385a6c 100644 --- a/controllers/topology/reconciler/serviceexpose_test.go +++ b/controllers/topology/reconciler/serviceexpose_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesconfig "github.com/srl-labs/clabernetes/config" @@ -20,6 +22,164 @@ import ( const renderServiceExposeTestName = "serviceexpose/render-service" +func TestResolveServiceExpose(t *testing.T) { + cases := []struct { + name string + ownedServices *k8scorev1.ServiceList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8scorev1.Service + }{ + { + name: "simple", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{}, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "missing-nodes", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "extra-nodes", + ownedServices: &k8scorev1.ServiceList{ + Items: []k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceExposeReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedServices, + testCase.clabernetesConfigs, + testCase.owningTopologyObject, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + func TestRenderServiceExpose(t *testing.T) { cases := []struct { name string diff --git a/controllers/topology/reconciler/servicefabric_test.go b/controllers/topology/reconciler/servicefabric_test.go index 85aea31d..25710c03 100644 --- a/controllers/topology/reconciler/servicefabric_test.go +++ b/controllers/topology/reconciler/servicefabric_test.go @@ -6,6 +6,10 @@ import ( "reflect" "testing" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetesconfig "github.com/srl-labs/clabernetes/config" @@ -18,6 +22,112 @@ import ( const renderServiceFabricTestName = "servicefabric/render-service" +func TestResolveServiceFabric(t *testing.T) { + cases := []struct { + name string + ownedServices *k8scorev1.ServiceList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8scorev1.Service + }{ + { + name: "simple", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: nil, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "missing-nodes", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "extra-nodes", + ownedServices: &k8scorev1.ServiceList{ + Items: []k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceFabricReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedServices, + testCase.clabernetesConfigs, + nil, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + func TestRenderServiceFabric(t *testing.T) { cases := []struct { name string