diff --git a/.golangci.yaml b/.golangci.yaml index 325c037a..4f578352 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -145,6 +145,10 @@ issues: - funlen - gochecknoglobals + - path: logging/fake.go + linters: + - revive + # ignore globals for standard k8s things - linters: - gochecknoglobals diff --git a/clabverter/files.go b/clabverter/files.go index 8b81f757..e43bfa51 100644 --- a/clabverter/files.go +++ b/clabverter/files.go @@ -6,6 +6,8 @@ import ( "strings" "text/template" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" ) @@ -262,7 +264,7 @@ func (c *Clabverter) handleExtraFiles() error { c.extraFilesConfigMaps[nodeName] = make([]topologyConfigMapTemplateVars, 0) for extraFilePath, extraFileContent := range nodeExtraFiles { - safeFileName := clabernetesutil.SafeConcatNameKubernetes( + safeFileName := clabernetesutilkubernetes.SafeConcatNameKubernetes( strings.Split(extraFilePath, "/")...) safeFileName = strings.TrimPrefix(safeFileName, "-") diff --git a/clicker/clabernetes.go b/clicker/clabernetes.go index 50244b7a..7155dfff 100644 --- a/clicker/clabernetes.go +++ b/clicker/clabernetes.go @@ -9,6 +9,8 @@ import ( "sync" "time" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + "gopkg.in/yaml.v3" claberneteserrors "github.com/srl-labs/clabernetes/errors" @@ -203,7 +205,7 @@ func (c *clabernetes) run() error { func (c *clabernetes) setup() error { var err error - c.namespace, err = clabernetesutil.CurrentNamespace() + c.namespace, err = clabernetesutilkubernetes.CurrentNamespace() if err != nil { c.logger.Criticalf("failed getting current namespace, err: %s", err) @@ -318,7 +320,7 @@ func envToResources() (k8scorev1.ResourceRequirements, error) { return out, nil } - parsedOut, err := clabernetesutil.YAMLToK8sResourceRequirements(asStr) + parsedOut, err := clabernetesutilkubernetes.YAMLToK8sResourceRequirements(asStr) if err != nil { return out, err } diff --git a/config/manager.go b/config/manager.go index e5c0777f..5568d250 100644 --- a/config/manager.go +++ b/config/manager.go @@ -25,6 +25,9 @@ var ( managerInstanceOnce sync.Once //nolint:gochecknoglobals ) +// ManagerGetterFunc returns an instance of the config manager. +type ManagerGetterFunc func() Manager + // InitManager initializes the config manager -- it does this once only, its a no-op if the manager // is already initialized. func InitManager(ctx context.Context, appName, namespace string, client *kubernetes.Clientset) { diff --git a/constants/kubernetes.go b/constants/kubernetes.go new file mode 100644 index 00000000..2c6e255c --- /dev/null +++ b/constants/kubernetes.go @@ -0,0 +1,18 @@ +package constants + +const ( + // KubernetesConfigMap is a const to use for "configmap". + KubernetesConfigMap = "configmap" + + // KubernetesService is a const to use for "service". + KubernetesService = "service" + + // KubernetesDeployment is a const to use for "deployment". + KubernetesDeployment = "deployment" + + // KubernetesServiceClusterIPType is a const to use for "ClusterIP". + KubernetesServiceClusterIPType = "ClusterIP" + + // KubernetesServiceLoadBalancerType is a const to use for "LoadBalancer". + KubernetesServiceLoadBalancerType = "LoadBalancer" +) diff --git a/controllers/base.go b/controllers/base.go index 57927524..4aa456a2 100644 --- a/controllers/base.go +++ b/controllers/base.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + ctrlruntimeevent "sigs.k8s.io/controller-runtime/pkg/event" ctrlruntimepredicate "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -58,7 +60,7 @@ func NewBaseController( return &BaseController{ Ctx: ctx, AppName: appName, - ControllerNamespace: clabernetesutil.MustCurrentNamespace(), + ControllerNamespace: clabernetesutilkubernetes.MustCurrentNamespace(), Log: logger, Config: config, Client: client, @@ -127,3 +129,12 @@ func (c *BaseController) LogReconcileCompleteObjectNotExist(_ ctrlruntime.Reques func (c *BaseController) LogReconcileFailedGettingObject(req ctrlruntime.Request, err error) { c.Log.Criticalf("failed fetching '%s/%s', error: %s", req.Namespace, req.Name, err) } + +// GetServiceDNSSuffix returns the default "svc.cluster.local" dns suffix, or the user's provided +// override value. +func (c *BaseController) GetServiceDNSSuffix() string { + return clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.InClusterDNSSuffixEnv, + clabernetesconstants.DefaultInClusterDNSSuffix, + ) +} diff --git a/controllers/deployments.go b/controllers/deployments.go deleted file mode 100644 index 048a5782..00000000 --- a/controllers/deployments.go +++ /dev/null @@ -1,56 +0,0 @@ -package controllers - -import ( - "reflect" - - k8sappsv1 "k8s.io/api/apps/v1" - k8scorev1 "k8s.io/api/core/v1" -) - -// ResolvedDeployments is an object that is used to track current and missing deployments for a -// controller such as Containerlab (topology). -type ResolvedDeployments struct { - // Current deployments by endpoint name - Current map[string]*k8sappsv1.Deployment - // Missing deployments by endpoint name - Missing []string - // Extra deployments that should be pruned - Extra []*k8sappsv1.Deployment -} - -// CurrentDeploymentNames returns a slice of the names of the current deployments. -func (r *ResolvedDeployments) CurrentDeploymentNames() []string { - names := make([]string, len(r.Current)) - - var idx int - - for k := range r.Current { - names[idx] = k - - idx++ - } - - return names -} - -// ContainersEqual returns true if the existing container slice matches the rendered container slice -// it ignores slice order. -func ContainersEqual(existing, rendered []k8scorev1.Container) bool { - for existingIdx := range existing { - var matched bool - - for renderedIdx := range rendered { - if reflect.DeepEqual(existing[existingIdx], rendered[renderedIdx]) { - matched = true - - break - } - } - - if !matched { - return false - } - } - - return true -} diff --git a/controllers/services.go b/controllers/services.go deleted file mode 100644 index 8c43f5f4..00000000 --- a/controllers/services.go +++ /dev/null @@ -1,31 +0,0 @@ -package controllers - -import ( - k8scorev1 "k8s.io/api/core/v1" -) - -// ResolvedServices is an object that is used to track current and missing services for a -// controller such as Containerlab (topology). -type ResolvedServices struct { - // Current deployments by endpoint name - Current map[string]*k8scorev1.Service - // Missing deployments by endpoint name - Missing []string - // Extra deployments that should be pruned - Extra []*k8scorev1.Service -} - -// CurrentServiceNames returns a slice of the names of the current services. -func (r *ResolvedServices) CurrentServiceNames() []string { - names := make([]string, len(r.Current)) - - var idx int - - for k := range r.Current { - names[idx] = k - - idx++ - } - - return names -} diff --git a/controllers/topology/configmap.go b/controllers/topology/configmap.go deleted file mode 100644 index d8b88ae8..00000000 --- a/controllers/topology/configmap.go +++ /dev/null @@ -1,159 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "reflect" - - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - apimachinerytypes "k8s.io/apimachinery/pkg/types" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - "gopkg.in/yaml.v3" - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -// RenderConfigMap accepts an object (just for name/namespace reasons) and a mapping of clabernetes -// sub-topology configs and tunnels and renders the final configmap for the deployment -- this is -// the configmap that will ultimately be referenced when mounting sub-topologies and tunnel data in -// the clabernetes launcher pod(s) for a given topology. -func (r *Reconciler) RenderConfigMap( - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, -) (*k8scorev1.ConfigMap, error) { - configManager := r.ConfigManagerGetter() - globalAnnotations, globalLabels := configManager.GetAllMetadata() - - configMapName := obj.GetName() - - configMap := &k8scorev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: obj.GetNamespace(), - Annotations: globalAnnotations, - Labels: map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: configMapName, - clabernetesconstants.LabelTopologyOwner: configMapName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, - }, - }, - Data: map[string]string{}, - } - - for k, v := range globalLabels { - configMap.Labels[k] = v - } - - for nodeName, nodeTopo := range clabernetesConfigs { - // always initialize the tunnels keys in the configmap, this way we don't have to have any - // special handling for no tunnels and things always look consistent; we'll override this - // down below if the node has tunnels of course! - configMap.Data[fmt.Sprintf("%s-tunnels", nodeName)] = "" - - yamlNodeTopo, err := yaml.Marshal(nodeTopo) - if err != nil { - return nil, err - } - - configMap.Data[nodeName] = string(yamlNodeTopo) - } - - for nodeName, nodeTunnels := range tunnels { - yamlNodeTunnels, err := yaml.Marshal(nodeTunnels) - if err != nil { - return nil, err - } - - configMap.Data[fmt.Sprintf("%s-tunnels", nodeName)] = string(yamlNodeTunnels) - } - - return configMap, nil -} - -func (r *Reconciler) createConfigMap( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, -) error { - configMap, err := r.RenderConfigMap(obj, clabernetesConfigs, tunnels) - if err != nil { - return err - } - - err = ctrlruntimeutil.SetOwnerReference(obj, configMap, r.Client.Scheme()) - if err != nil { - return err - } - - return r.Client.Create(ctx, configMap) -} - -func (r *Reconciler) enforceConfigMap( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, - actual *k8scorev1.ConfigMap, -) error { - configMap, err := r.RenderConfigMap(obj, clabernetesConfigs, tunnels) - if err != nil { - return err - } - - err = ctrlruntimeutil.SetOwnerReference(obj, configMap, r.Client.Scheme()) - if err != nil { - return err - } - - if configMapConforms(actual, configMap, obj.GetUID()) { - // nothing to do - return nil - } - - return r.Client.Update(ctx, configMap) -} - -func configMapConforms( - existingConfigMap, - renderedConfigMap *k8scorev1.ConfigMap, - expectedOwnerUID apimachinerytypes.UID, -) bool { - if !reflect.DeepEqual(existingConfigMap.Data, renderedConfigMap.Data) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingConfigMap.ObjectMeta.Annotations, - renderedConfigMap.ObjectMeta.Annotations, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingConfigMap.ObjectMeta.Labels, - renderedConfigMap.ObjectMeta.Labels, - ) { - return false - } - - if len(existingConfigMap.ObjectMeta.OwnerReferences) != 1 { - // we should have only one owner reference, the extractor - return false - } - - if existingConfigMap.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { - // owner ref uid is not us - return false - } - - return true -} diff --git a/controllers/topology/configmap_test.go b/controllers/topology/configmap_test.go deleted file mode 100644 index c93edaa2..00000000 --- a/controllers/topology/configmap_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package topology_test - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - - clabernetesconfig "github.com/srl-labs/clabernetes/config" - - k8scorev1 "k8s.io/api/core/v1" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" - clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" - clabernetesutil "github.com/srl-labs/clabernetes/util" - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" -) - -var defaultPorts = []string{ - "21022:22/tcp", - "21023:23/tcp", - "21161:161/udp", - "33333:57400/tcp", - "60000:21/tcp", - "60001:80/tcp", - "60002:443/tcp", - "60003:830/tcp", - "60004:5000/tcp", - "60005:5900/tcp", - "60006:6030/tcp", - "60007:9339/tcp", - "60008:9340/tcp", - "60009:9559/tcp", -} - -const renderConfigMapTestName = "configmap/render-config-map" - -// TestRenderConfigMap ensures that we properly render the main tunnel/config configmap for a given -// c9s deployment (containerlab CR). -func TestRenderConfigMap(t *testing.T) { - cases := []struct { - name string - obj ctrlruntimeclient.Object - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel - }{ - { - name: "basic-two-node-with-links", - obj: &clabernetesapistopologyv1alpha1.Containerlab{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: "nowhere", - }, - }, - clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ - "srl1": { - Name: "clabernetes-srl1", - Prefix: clabernetesutil.ToPointer(""), - Topology: &clabernetesutilcontainerlab.Topology{ - Defaults: &clabernetesutilcontainerlab.NodeDefinition{ - Ports: defaultPorts, - }, - Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ - "srl1": { - Kind: "srl", - }, - }, - Links: []*clabernetesutilcontainerlab.LinkDefinition{ - { - LinkConfig: clabernetesutilcontainerlab.LinkConfig{ - Endpoints: []string{ - "srl1:e1-1", - "host:srl1-e1-1", - }, - }, - }, - }, - }, - Debug: false, - }, - "srl2": { - Name: "clabernetes-srl2", - Prefix: clabernetesutil.ToPointer(""), - Topology: &clabernetesutilcontainerlab.Topology{ - Defaults: &clabernetesutilcontainerlab.NodeDefinition{ - Ports: defaultPorts, - }, - Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ - "srl2": { - Kind: "srl", - }, - }, - Links: []*clabernetesutilcontainerlab.LinkDefinition{ - { - LinkConfig: clabernetesutilcontainerlab.LinkConfig{ - Endpoints: []string{ - "srl2:e1-1", - "host:srl2-e1-1", - }, - }, - }, - }, - }, - Debug: false, - }, - }, - tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{ - "srl1": { - { - ID: 1, - LocalNodeName: "srl1", - RemoteName: "unitTest-srl2-vx.clabernetes.svc.cluster.local", - RemoteNodeName: "srl2", - LocalLinkName: "e1-1", - RemoteLinkName: "e1-1", - }, - }, - "srl2": { - { - ID: 1, - LocalNodeName: "srl2", - RemoteName: "unitTest-srl1-vx.clabernetes.svc.cluster.local", - RemoteNodeName: "srl1", - LocalLinkName: "e1-1", - RemoteLinkName: "e1-1", - }, - }, - }, - }, - { - name: "basic-two-node-no-links", - obj: &clabernetesapistopologyv1alpha1.Containerlab{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: "nowhere", - }, - }, - clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ - "srl1": { - Name: "clabernetes-srl1", - Prefix: clabernetesutil.ToPointer(""), - Topology: &clabernetesutilcontainerlab.Topology{ - Defaults: &clabernetesutilcontainerlab.NodeDefinition{ - Ports: defaultPorts, - }, - Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ - "srl1": { - Kind: "srl", - }, - }, - Links: []*clabernetesutilcontainerlab.LinkDefinition{}, - }, - Debug: false, - }, - "srl2": { - Name: "clabernetes-srl2", - Prefix: clabernetesutil.ToPointer(""), - Topology: &clabernetesutilcontainerlab.Topology{ - Defaults: &clabernetesutilcontainerlab.NodeDefinition{ - Ports: defaultPorts, - }, - Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ - "srl2": { - Kind: "srl", - }, - }, - Links: []*clabernetesutilcontainerlab.LinkDefinition{}, - }, - Debug: false, - }, - }, - tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{ - "srl1": {}, - "srl2": {}, - }, - }, - } - - for _, testCase := range cases { - t.Run( - testCase.name, - func(t *testing.T) { - t.Logf("%s: starting", testCase.name) - - reconciler := clabernetescontrollerstopology.Reconciler{ - ResourceKind: "containerlab", - ConfigManagerGetter: clabernetesconfig.GetFakeManager, - } - - got, err := reconciler.RenderConfigMap( - testCase.obj, - testCase.clabernetesConfigs, - testCase.tunnels, - ) - if err != nil { - t.Fatal(err) - } - - if *clabernetestesthelper.Update { - clabernetestesthelper.WriteTestFixtureJSON( - t, - fmt.Sprintf("golden/%s/%s.json", renderConfigMapTestName, testCase.name), - got, - ) - } - - var want k8scorev1.ConfigMap - - err = json.Unmarshal( - clabernetestesthelper.ReadTestFixtureFile( - t, - fmt.Sprintf("golden/%s/%s.json", renderConfigMapTestName, testCase.name), - ), - &want, - ) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(got.Annotations, want.Annotations) { - clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) - } - if !reflect.DeepEqual(got.Labels, want.Labels) { - clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) - } - if !reflect.DeepEqual(got.Data, want.Data) { - clabernetestesthelper.FailOutput(t, got.Data, want.Data) - } - }, - ) - } -} diff --git a/controllers/topology/containerlab/config.go b/controllers/topology/containerlab/config.go index d1043053..d7d4561e 100644 --- a/controllers/topology/containerlab/config.go +++ b/controllers/topology/containerlab/config.go @@ -403,7 +403,7 @@ func (c *Controller) processConfigForNode( clab.Name, uninterestingEndpoint.NodeName, clab.Namespace, - clabernetescontrollerstopology.GetServiceDNSSuffix(), + c.BaseController.GetServiceDNSSuffix(), ), LocalLinkName: interestingEndpoint.InterfaceName, RemoteLinkName: uninterestingEndpoint.InterfaceName, diff --git a/controllers/topology/containerlab/controller.go b/controllers/topology/containerlab/controller.go index 1c0e5abc..b96e2d10 100644 --- a/controllers/topology/containerlab/controller.go +++ b/controllers/topology/containerlab/controller.go @@ -14,7 +14,7 @@ import ( clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" "k8s.io/client-go/rest" ctrlruntime "sigs.k8s.io/controller-runtime" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,11 +41,11 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: &clabernetescontrollerstopology.Reconciler{ - Log: baseController.Log, - Client: baseController.Client, - ResourceKind: clabernetesapistopology.Containerlab, - ResourceLister: func( + TopologyReconciler: clabernetescontrollerstopologyreconciler.NewReconciler( + baseController.Log, + baseController.Client, + clabernetesapistopology.Containerlab, + func( ctx context.Context, client ctrlruntimeclient.Client, ) ([]ctrlruntimeclient.Object, error) { @@ -67,8 +67,8 @@ func NewController( return out, nil }, - ConfigManagerGetter: clabernetesconfig.GetManager, - }, + clabernetesconfig.GetManager, + ), } return c @@ -77,7 +77,7 @@ func NewController( // Controller is the Containerlab topology controller object. type Controller struct { *clabernetescontrollers.BaseController - TopologyReconciler *clabernetescontrollerstopology.Reconciler + TopologyReconciler *clabernetescontrollerstopologyreconciler.Reconciler } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/topology/deployment.go b/controllers/topology/deployment.go deleted file mode 100644 index b550ce43..00000000 --- a/controllers/topology/deployment.go +++ /dev/null @@ -1,636 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "reflect" - "strings" - "time" - - clabernetesconfig "github.com/srl-labs/clabernetes/config" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8sappsv1 "k8s.io/api/apps/v1" - k8scorev1 "k8s.io/api/core/v1" - apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apimachinerytypes "k8s.io/apimachinery/pkg/types" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveDeployments( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedDeployments, error) { - ownedDeployments := &k8sappsv1.DeploymentList{} - - err := r.Client.List( - ctx, - ownedDeployments, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) - - return nil, err - } - - deployments := &clabernetescontrollers.ResolvedDeployments{ - Current: map[string]*k8sappsv1.Deployment{}, - } - - for i := range ownedDeployments.Items { - labels := ownedDeployments.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - deployments.Current[nodeName] = &ownedDeployments.Items[i] - } - - allNodes := make([]string, len(clabernetesConfigs)) - - var nodeIdx int - - for nodeName := range clabernetesConfigs { - allNodes[nodeIdx] = nodeName - - nodeIdx++ - } - - deployments.Missing = clabernetesutil.StringSliceDifference( - deployments.CurrentDeploymentNames(), - allNodes, - ) - - r.Log.Debugf( - "deployments are missing for the following nodes: %s", - deployments.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - allNodes, - deployments.CurrentDeploymentNames(), - ) - - r.Log.Debugf( - "extraneous deployments exist for following nodes: %s", - extraEndpointDeployments, - ) - - deployments.Extra = make([]*k8sappsv1.Deployment, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - deployments.Extra[idx] = deployments.Current[endpoint] - } - - return deployments, nil -} - -func (r *Reconciler) pruneDeployments( - ctx context.Context, - deployments *clabernetescontrollers.ResolvedDeployments, -) error { - r.Log.Info("pruning extraneous deployments") - - for _, extraDeployment := range deployments.Extra { - r.Log.Debugf( - "removing deployment '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing deployment '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceDeployments( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - deployments *clabernetescontrollers.ResolvedDeployments, -) error { - // handle missing deployments - r.Log.Info("creating missing deployments") - - for _, nodeName := range deployments.Missing { - deployment := renderDeployment( - obj, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, deployment, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating deployment '%s/%s'", - deployment.Namespace, - deployment.Name, - ) - - err = r.Client.Create(ctx, deployment) - if err != nil { - r.Log.Criticalf( - "failed creating deployment '%s/%s' error: %s", - deployment.Namespace, - deployment.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing deployments") - - for nodeName, deployment := range deployments.Current { - r.Log.Debugf( - "comparing existing deployment '%s/%s' to desired state", - deployment.Namespace, - deployment.Name, - ) - - expectedDeployment := renderDeployment( - obj, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, expectedDeployment, r.Client.Scheme()) - if err != nil { - return err - } - - if !deploymentConforms(deployment, expectedDeployment, obj.GetUID()) { - r.Log.Debugf( - "comparing existing deployment '%s/%s' spec does not conform to desired state, "+ - "updating", - deployment.Namespace, - deployment.Name, - ) - - err = r.Client.Update(ctx, expectedDeployment) - if err != nil { - r.Log.Criticalf( - "failed updating deployment '%s/%s' error: %s", - expectedDeployment.Namespace, - expectedDeployment.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) restartDeploymentForNode( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - nodeName string, -) error { - deploymentName := fmt.Sprintf("%s-%s", obj.GetName(), nodeName) - - nodeDeployment := &k8sappsv1.Deployment{} - - err := r.Client.Get( - ctx, - apimachinerytypes.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: deploymentName, - }, - nodeDeployment, - ) - if err != nil { - if apimachineryerrors.IsNotFound(err) { - r.Log.Warnf( - "could not find deployment '%s', cannot restart after config change,"+ - " this should not happen", - deploymentName, - ) - - return nil - } - - return err - } - - if nodeDeployment.Spec.Template.ObjectMeta.Annotations == nil { - nodeDeployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} - } - - now := time.Now().Format(time.RFC3339) - - nodeDeployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = now - - return r.Client.Update(ctx, nodeDeployment) -} - -func renderDeployment( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, -) *k8sappsv1.Deployment { - globalAnnotations, globalLabels := clabernetesconfig.GetManager().GetAllMetadata() - - name := obj.GetName() - - deploymentName := fmt.Sprintf("%s-%s", name, nodeName) - configVolumeName := fmt.Sprintf("%s-config", name) - - // match labels are immutable and dont matter if they have the users provided "global" labels, - // so make those first then copy those into "normal" labels and add the other stuff - matchLabels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: deploymentName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - } - - labels := make(map[string]string) - - for k, v := range matchLabels { - labels[k] = v - } - - for k, v := range globalLabels { - labels[k] = v - } - - commonSpec := obj.GetTopologyCommonSpec() - - launcherLogLevel := clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherLoggerLevelEnv, - clabernetesconstants.Info, - ) - - if commonSpec.LauncherLogLevel != "" { - launcherLogLevel = commonSpec.LauncherLogLevel - } - - deployment := &k8sappsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: obj.GetNamespace(), - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8sappsv1.DeploymentSpec{ - Replicas: clabernetesutil.ToPointer(int32(1)), - RevisionHistoryLimit: clabernetesutil.ToPointer(int32(0)), - Selector: &metav1.LabelSelector{ - MatchLabels: matchLabels, - }, - Template: k8scorev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8scorev1.PodSpec{ - Containers: []k8scorev1.Container{ - { - Name: nodeName, - WorkingDir: "/clabernetes", - Image: clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherImageEnv, - clabernetesconstants.LauncherDefaultImage, - ), - Command: []string{"/clabernetes/manager", "launch"}, - Ports: []k8scorev1.ContainerPort{ - { - Name: "vxlan", - ContainerPort: clabernetesconstants.VXLANServicePort, - Protocol: "UDP", - }, - }, - VolumeMounts: []k8scorev1.VolumeMount{ - { - Name: configVolumeName, - ReadOnly: true, - MountPath: "/clabernetes/topo.clab.yaml", - SubPath: nodeName, - }, - { - Name: configVolumeName, - ReadOnly: true, - MountPath: "/clabernetes/tunnels.yaml", - SubPath: fmt.Sprintf("%s-tunnels", nodeName), - }, - }, - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: "File", - ImagePullPolicy: k8scorev1.PullPolicy( - clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.LauncherPullPolicyEnv, - "IfNotPresent", - ), - ), - SecurityContext: &k8scorev1.SecurityContext{ - // obviously we need privileged for dind setup - Privileged: clabernetesutil.ToPointer(true), - RunAsUser: clabernetesutil.ToPointer(int64(0)), - }, - Env: []k8scorev1.EnvVar{ - { - Name: clabernetesconstants.LauncherLoggerLevelEnv, - Value: launcherLogLevel, - }, - }, - }, - }, - RestartPolicy: "Always", - ServiceAccountName: "default", - Volumes: []k8scorev1.Volume{ - { - Name: configVolumeName, - VolumeSource: k8scorev1.VolumeSource{ - ConfigMap: &k8scorev1.ConfigMapVolumeSource{ - LocalObjectReference: k8scorev1.LocalObjectReference{ - Name: name, - }, - }, - }, - }, - }, - }, - }, - }, - } - - if commonSpec.ContainerlabDebug { - deployment.Spec.Template.Spec.Containers[0].Env = append( - deployment.Spec.Template.Spec.Containers[0].Env, - k8scorev1.EnvVar{ - Name: clabernetesconstants.LauncherContainerlabDebug, - Value: clabernetesconstants.True, - }, - ) - } - - deployment = renderDeploymentAddFilesFromConfigMaps(nodeName, obj, deployment) - - deployment = renderDeploymentAddInsecureRegistries(obj, deployment) - - deployment = renderDeploymentAddResources(obj, clabernetesConfigs, nodeName, deployment) - - return deployment -} - -func volumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { - for idx := range existingVolumes { - if volumeName == existingVolumes[idx].Name { - return true - } - } - - return false -} - -func renderDeploymentAddFilesFromConfigMaps( - nodeName string, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - podVolumes := make([]clabernetesapistopologyv1alpha1.FileFromConfigMap, 0) - - for _, fileFromConfigMap := range obj.GetTopologyCommonSpec().FilesFromConfigMap { - if fileFromConfigMap.NodeName != nodeName { - continue - } - - podVolumes = append(podVolumes, fileFromConfigMap) - } - - for _, podVolume := range podVolumes { - if !volumeAlreadyMounted(podVolume.ConfigMapName, deployment.Spec.Template.Spec.Volumes) { - deployment.Spec.Template.Spec.Volumes = append( - deployment.Spec.Template.Spec.Volumes, - k8scorev1.Volume{ - Name: podVolume.ConfigMapName, - VolumeSource: k8scorev1.VolumeSource{ - ConfigMap: &k8scorev1.ConfigMapVolumeSource{ - LocalObjectReference: k8scorev1.LocalObjectReference{ - Name: podVolume.ConfigMapName, - }, - }, - }, - }, - ) - } - - volumeMount := k8scorev1.VolumeMount{ - Name: podVolume.ConfigMapName, - ReadOnly: false, - MountPath: fmt.Sprintf("/clabernetes/%s", podVolume.FilePath), - SubPath: podVolume.ConfigMapPath, - } - - deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append( - deployment.Spec.Template.Spec.Containers[0].VolumeMounts, - volumeMount, - ) - } - - return deployment -} - -func renderDeploymentAddInsecureRegistries( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - insecureRegistries := obj.GetTopologyCommonSpec().InsecureRegistries - - if len(insecureRegistries) > 0 { - deployment.Spec.Template.Spec.Containers[0].Env = append( - deployment.Spec.Template.Spec.Containers[0].Env, - k8scorev1.EnvVar{ - Name: clabernetesconstants.LauncherInsecureRegistries, - Value: strings.Join(insecureRegistries, ","), - }, - ) - } - - return deployment -} - -func renderDeploymentAddResources( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, - deployment *k8sappsv1.Deployment, -) *k8sappsv1.Deployment { - commonSpec := obj.GetTopologyCommonSpec() - - nodeResources, nodeResourcesOk := commonSpec.Resources[nodeName] - if nodeResourcesOk { - deployment.Spec.Template.Spec.Containers[0].Resources = nodeResources - - return deployment - } - - defaultResources, defaultResourcesOk := commonSpec.Resources[clabernetesconstants.Default] - if defaultResourcesOk { - deployment.Spec.Template.Spec.Containers[0].Resources = defaultResources - - return deployment - } - - resources := clabernetesconfig.GetManager().GetResourcesForContainerlabKind( - clabernetesConfigs[nodeName].Topology.GetNodeKindType(nodeName), - ) - - if resources != nil { - deployment.Spec.Template.Spec.Containers[0].Resources = *resources - } - - return deployment -} - -func deploymentConforms( - existingDeployment, - renderedDeployment *k8sappsv1.Deployment, - expectedOwnerUID apimachinerytypes.UID, -) bool { - if !reflect.DeepEqual(existingDeployment.Spec.Replicas, renderedDeployment.Spec.Replicas) { - return false - } - - if !reflect.DeepEqual(existingDeployment.Spec.Selector, renderedDeployment.Spec.Selector) { - return false - } - - if !clabernetescontrollers.ContainersEqual( - existingDeployment.Spec.Template.Spec.Containers, - renderedDeployment.Spec.Template.Spec.Containers, - ) { - return false - } - - if !reflect.DeepEqual( - existingDeployment.Spec.Template.Spec.ServiceAccountName, - renderedDeployment.Spec.Template.Spec.ServiceAccountName, - ) { - return false - } - - if !reflect.DeepEqual( - existingDeployment.Spec.Template.Spec.RestartPolicy, - renderedDeployment.Spec.Template.Spec.RestartPolicy, - ) { - return false - } - - // this and labels will probably be a future us problem -- maybe some mutating webhooks will be - // adding labels or annotations that will cause us to continually reconcile, that would be lame - // ... we'll cross that bridge when we get there :) - if !reflect.DeepEqual( - existingDeployment.Spec.Template.ObjectMeta.Annotations, - renderedDeployment.Spec.Template.ObjectMeta.Annotations, - ) { - return false - } - - if !reflect.DeepEqual( - existingDeployment.Spec.Template.ObjectMeta.Labels, - renderedDeployment.Spec.Template.ObjectMeta.Labels, - ) { - return false - } - - if existingDeployment.ObjectMeta.Annotations == nil && - renderedDeployment.ObjectMeta.Annotations != nil { - // obviously our annotations don't exist, so we need to enforce that - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Annotations, - renderedDeployment.ObjectMeta.Annotations, - ) { - return false - } - - if !clabernetescontrollers.AnnotationsOrLabelsConform( - existingDeployment.ObjectMeta.Labels, - renderedDeployment.ObjectMeta.Labels, - ) { - return false - } - - if len(existingDeployment.ObjectMeta.OwnerReferences) != 1 { - // we should have only one owner reference, the extractor - return false - } - - if existingDeployment.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { - // owner ref uid is not us - return false - } - - return true -} - -func determineNodesNeedingRestart( - preReconcileConfigs, - configs map[string]*clabernetesutilcontainerlab.Config, -) []string { - var nodesNeedingRestart []string - - for nodeName, nodeConfig := range configs { - _, nodeExistedBefore := preReconcileConfigs[nodeName] - if !nodeExistedBefore { - continue - } - - if !reflect.DeepEqual(nodeConfig, preReconcileConfigs[nodeName]) { - nodesNeedingRestart = append( - nodesNeedingRestart, - nodeName, - ) - } - } - - return nodesNeedingRestart -} diff --git a/controllers/topology/kne/config.go b/controllers/topology/kne/config.go index 873e9d52..9c0f5bf7 100644 --- a/controllers/topology/kne/config.go +++ b/controllers/topology/kne/config.go @@ -160,7 +160,7 @@ func (c *Controller) processConfig( //nolint:funlen kne.Name, uninterestingEndpoint.NodeName, kne.Namespace, - clabernetescontrollerstopology.GetServiceDNSSuffix(), + c.BaseController.GetServiceDNSSuffix(), ), LocalLinkName: interestingEndpoint.InterfaceName, RemoteLinkName: uninterestingEndpoint.InterfaceName, diff --git a/controllers/topology/kne/controller.go b/controllers/topology/kne/controller.go index 66722506..5bcfd009 100644 --- a/controllers/topology/kne/controller.go +++ b/controllers/topology/kne/controller.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + clabernetesconfig "github.com/srl-labs/clabernetes/config" ctrlruntimebuilder "sigs.k8s.io/controller-runtime/pkg/builder" @@ -11,8 +13,6 @@ import ( k8scorev1 "k8s.io/api/core/v1" ctrlruntimehandler "sigs.k8s.io/controller-runtime/pkg/handler" - clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" - clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" @@ -43,11 +43,11 @@ func NewController( c := &Controller{ BaseController: baseController, - TopologyReconciler: &clabernetescontrollerstopology.Reconciler{ - Log: baseController.Log, - Client: baseController.Client, - ResourceKind: clabernetesapistopology.Kne, - ResourceLister: func( + TopologyReconciler: clabernetescontrollerstopologyreconciler.NewReconciler( + baseController.Log, + baseController.Client, + clabernetesapistopology.Kne, + func( ctx context.Context, client ctrlruntimeclient.Client, ) ([]ctrlruntimeclient.Object, error) { @@ -69,8 +69,8 @@ func NewController( return out, nil }, - ConfigManagerGetter: clabernetesconfig.GetManager, - }, + clabernetesconfig.GetManager, + ), } return c @@ -79,7 +79,7 @@ func NewController( // Controller is the Containerlab topology controller object. type Controller struct { *clabernetescontrollers.BaseController - TopologyReconciler *clabernetescontrollerstopology.Reconciler + TopologyReconciler *clabernetescontrollerstopologyreconciler.Reconciler } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler.go deleted file mode 100644 index 1abcddee..00000000 --- a/controllers/topology/reconciler.go +++ /dev/null @@ -1,211 +0,0 @@ -package topology - -import ( - "context" - "slices" - - clabernetesconfig "github.com/srl-labs/clabernetes/config" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - claberneteslogging "github.com/srl-labs/clabernetes/logging" - clabernetesutil "github.com/srl-labs/clabernetes/util" - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - "gopkg.in/yaml.v3" - k8scorev1 "k8s.io/api/core/v1" - apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" - apimachinerytypes "k8s.io/apimachinery/pkg/types" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimereconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// Reconciler (TopologyReconciler) is the base clabernetes topology reconciler that is embedded in -// all clabernetes topology controllers, it provides common methods for reconciling the -// common/standard resources that represent a clabernetes object (configmap, deployments, -// services, etc.). -type Reconciler struct { - Log claberneteslogging.Instance - Client ctrlruntimeclient.Client - ResourceKind string - ResourceLister func( - ctx context.Context, - client ctrlruntimeclient.Client, - ) ([]ctrlruntimeclient.Object, error) - ConfigManagerGetter func() clabernetesconfig.Manager -} - -// ReconcileConfigMap reconciles the primary configmap containing clabernetes configs and tunnel -// information. -func (r *Reconciler) ReconcileConfigMap( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, -) error { - configMap := &k8scorev1.ConfigMap{} - - err := r.Client.Get( - ctx, - apimachinerytypes.NamespacedName{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - }, - configMap, - ) - if err != nil { - if apimachineryerrors.IsNotFound(err) { - return r.createConfigMap(ctx, obj, clabernetesConfigs, tunnels) - } - - return err - } - - return r.enforceConfigMap(ctx, obj, clabernetesConfigs, tunnels, configMap) -} - -// ReconcileDeployments reconciles the deployments that make up a clabernetes Topology. -func (r *Reconciler) ReconcileDeployments( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - preReconcileConfigs, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) error { - deployments, err := r.resolveDeployments(ctx, obj, clabernetesConfigs) - if err != nil { - return err - } - - err = r.pruneDeployments(ctx, deployments) - if err != nil { - return err - } - - err = r.enforceDeployments(ctx, obj, clabernetesConfigs, deployments) - if err != nil { - return err - } - - nodesNeedingRestart := determineNodesNeedingRestart(preReconcileConfigs, clabernetesConfigs) - if len(nodesNeedingRestart) == 0 { - return nil - } - - for _, nodeName := range nodesNeedingRestart { - if slices.Contains(deployments.Missing, nodeName) { - // is a new node, don't restart, we'll deploy it soon - continue - } - - r.Log.Infof( - "restarting the nodes '%s' as configurations have changed", - nodesNeedingRestart, - ) - - err = r.restartDeploymentForNode(ctx, obj, nodeName) - if err != nil { - return err - } - } - - return nil -} - -// ReconcileServiceFabric reconciles the service used for "fabric" (inter node) connectivity. -func (r *Reconciler) ReconcileServiceFabric( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) error { - services, err := r.resolveFabricServices(ctx, obj, clabernetesConfigs) - if err != nil { - return err - } - - err = r.pruneFabricServices(ctx, services) - if err != nil { - return err - } - - err = r.enforceFabricServices(ctx, obj, services) - if err != nil { - return err - } - - return nil -} - -// ReconcileServicesExpose reconciles the service(s) used for exposing nodes. -func (r *Reconciler) ReconcileServicesExpose( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (bool, error) { - var shouldUpdate bool - - objTopologyStatus := obj.GetTopologyStatus() - - if objTopologyStatus.NodeExposedPorts == nil { - objTopologyStatus.NodeExposedPorts = map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{} //nolint:lll - - shouldUpdate = true - } - - services, err := r.resolveExposeServices(ctx, obj, clabernetesConfigs) - if err != nil { - return shouldUpdate, err - } - - err = r.pruneExposeServices(ctx, services) - if err != nil { - return shouldUpdate, err - } - - err = r.enforceExposeServices(ctx, obj, &objTopologyStatus, clabernetesConfigs, services) - if err != nil { - return shouldUpdate, err - } - - nodeExposedPortsBytes, err := yaml.Marshal(objTopologyStatus.NodeExposedPorts) - if err != nil { - return shouldUpdate, err - } - - newNodeExposedPortsHash := clabernetesutil.HashBytes(nodeExposedPortsBytes) - - if objTopologyStatus.NodeExposedPortsHash != newNodeExposedPortsHash { - objTopologyStatus.NodeExposedPortsHash = newNodeExposedPortsHash - - obj.SetTopologyStatus(objTopologyStatus) - - shouldUpdate = true - } - - return shouldUpdate, nil -} - -// EnqueueForAll enqueues a reconcile for kinds the Reconciler represents. This is probably not very -// efficient/good but we should have low volume and we're using the cached ctrlruntime client so its -// probably ok :). -func (r *Reconciler) EnqueueForAll( - ctx context.Context, - _ ctrlruntimeclient.Object, -) []ctrlruntimereconcile.Request { - objList, err := r.ResourceLister(ctx, r.Client) - if err != nil { - r.Log.Criticalf("failed listing resource objects in EnqueueForAll, err: %s", err) - - return nil - } - - requests := make([]ctrlruntimereconcile.Request, len(objList)) - - for idx := range objList { - requests[idx] = ctrlruntimereconcile.Request{ - NamespacedName: apimachinerytypes.NamespacedName{ - Namespace: objList[idx].GetNamespace(), - Name: objList[idx].GetName(), - }, - } - } - - return requests -} diff --git a/controllers/topology/reconciler/configmap.go b/controllers/topology/reconciler/configmap.go new file mode 100644 index 00000000..07a90cba --- /dev/null +++ b/controllers/topology/reconciler/configmap.go @@ -0,0 +1,136 @@ +package reconciler + +import ( + "fmt" + "reflect" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + "gopkg.in/yaml.v3" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +// NewConfigMapReconciler returns an instance of ConfigMapReconciler. +func NewConfigMapReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ConfigMapReconciler { + return &ConfigMapReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// ConfigMapReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating configmaps for a +// clabernetes topology resource. +type ConfigMapReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Render accepts an object (just for name/namespace reasons) and a mapping of clabernetes +// sub-topology configs and tunnels and renders the final configmap for the deployment -- this is +// the configmap that will ultimately be referenced when mounting sub-topologies and tunnel data in +// the clabernetes launcher pod(s) for a given topology. +func (r *ConfigMapReconciler) Render( + owningTopologyNamespacedName apimachinerytypes.NamespacedName, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, +) (*k8scorev1.ConfigMap, error) { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + labels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: owningTopologyNamespacedName.Name, + clabernetesconstants.LabelTopologyOwner: owningTopologyNamespacedName.Name, + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + } + + for k, v := range globalLabels { + labels[k] = v + } + + data := make(map[string]string) + + for nodeName, nodeTopo := range clabernetesConfigs { + // always initialize the tunnels keys in the configmap, this way we don't have to have any + // special handling for no tunnels and things always look consistent; we'll override this + // down below if the node has tunnels of course! + data[fmt.Sprintf("%s-tunnels", nodeName)] = "" + + yamlNodeTopo, err := yaml.Marshal(nodeTopo) + if err != nil { + return nil, err + } + + data[nodeName] = string(yamlNodeTopo) + } + + for nodeName, nodeTunnels := range tunnels { + yamlNodeTunnels, err := yaml.Marshal(nodeTunnels) + if err != nil { + return nil, err + } + + data[fmt.Sprintf("%s-tunnels", nodeName)] = string(yamlNodeTunnels) + } + + return &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: owningTopologyNamespacedName.Name, + Namespace: owningTopologyNamespacedName.Namespace, + Annotations: annotations, + Labels: labels, + }, + Data: data, + }, nil +} + +// Conforms checks if the existingConfigMap conforms with the renderedConfigMap. +func (r *ConfigMapReconciler) Conforms( + existingConfigMap, + renderedConfigMap *k8scorev1.ConfigMap, + expectedOwnerUID apimachinerytypes.UID, +) bool { + if !reflect.DeepEqual(existingConfigMap.Data, renderedConfigMap.Data) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingConfigMap.ObjectMeta.Annotations, + renderedConfigMap.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingConfigMap.ObjectMeta.Labels, + renderedConfigMap.ObjectMeta.Labels, + ) { + return false + } + + if len(existingConfigMap.ObjectMeta.OwnerReferences) != 1 { + // we should have only one owner reference, the extractor + return false + } + + if existingConfigMap.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { + // owner ref uid is not us + return false + } + + return true +} diff --git a/controllers/topology/reconciler/configmap_test.go b/controllers/topology/reconciler/configmap_test.go new file mode 100644 index 00000000..bb26f558 --- /dev/null +++ b/controllers/topology/reconciler/configmap_test.go @@ -0,0 +1,502 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + + k8scorev1 "k8s.io/api/core/v1" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" +) + +var defaultPorts = []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", +} + +const renderConfigMapTestName = "configmap/render-config-map" + +// TestRenderConfigMap ensures that we properly render the main tunnel/config configmap for a given +// c9s deployment (containerlab CR). +func TestRenderConfigMap(t *testing.T) { + cases := []struct { + name string + namespacedName apimachinerytypes.NamespacedName + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel + }{ + { + name: "basic-two-node-with-links", + namespacedName: apimachinerytypes.NamespacedName{ + Name: "test-configmap", + Namespace: "nowhere", + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "clabernetes-srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: defaultPorts, + }, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + }, + }, + Links: []*clabernetesutilcontainerlab.LinkDefinition{ + { + LinkConfig: clabernetesutilcontainerlab.LinkConfig{ + Endpoints: []string{ + "srl1:e1-1", + "host:srl1-e1-1", + }, + }, + }, + }, + }, + Debug: false, + }, + "srl2": { + Name: "clabernetes-srl2", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: defaultPorts, + }, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl2": { + Kind: "srl", + }, + }, + Links: []*clabernetesutilcontainerlab.LinkDefinition{ + { + LinkConfig: clabernetesutilcontainerlab.LinkConfig{ + Endpoints: []string{ + "srl2:e1-1", + "host:srl2-e1-1", + }, + }, + }, + }, + }, + Debug: false, + }, + }, + tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{ + "srl1": { + { + ID: 1, + LocalNodeName: "srl1", + RemoteName: "unitTest-srl2-vx.clabernetes.svc.cluster.local", + RemoteNodeName: "srl2", + LocalLinkName: "e1-1", + RemoteLinkName: "e1-1", + }, + }, + "srl2": { + { + ID: 1, + LocalNodeName: "srl2", + RemoteName: "unitTest-srl1-vx.clabernetes.svc.cluster.local", + RemoteNodeName: "srl1", + LocalLinkName: "e1-1", + RemoteLinkName: "e1-1", + }, + }, + }, + }, + { + name: "basic-two-node-no-links", + namespacedName: apimachinerytypes.NamespacedName{ + Name: "test-configmap", + Namespace: "nowhere", + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "clabernetes-srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: defaultPorts, + }, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + }, + }, + Links: []*clabernetesutilcontainerlab.LinkDefinition{}, + }, + Debug: false, + }, + "srl2": { + Name: "clabernetes-srl2", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: defaultPorts, + }, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl2": { + Kind: "srl", + }, + }, + Links: []*clabernetesutilcontainerlab.LinkDefinition{}, + }, + Debug: false, + }, + }, + tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{ + "srl1": {}, + "srl2": {}, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewConfigMapReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Render( + testCase.namespacedName, + testCase.clabernetesConfigs, + testCase.tunnels, + ) + if err != nil { + t.Fatal(err) + } + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf("golden/%s/%s.json", renderConfigMapTestName, testCase.name), + got, + ) + } + + var want k8scorev1.ConfigMap + + err = json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf("golden/%s/%s.json", renderConfigMapTestName, testCase.name), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Data, want.Data) { + clabernetestesthelper.FailOutput(t, got.Data, want.Data) + } + }, + ) + } +} + +func TestConfigMapConforms(t *testing.T) { + cases := []struct { + name string + existing *k8scorev1.ConfigMap + rendered *k8scorev1.ConfigMap + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-data-extra-stuff", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Data: map[string]string{ + "something": "not in the expected", + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-data-missing-stuff", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + Data: map[string]string{ + "something": "we expect expected", + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + + // annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // labels + + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // owner + + { + name: "bad-owner", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8scorev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.ConfigMap{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewConfigMapReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + actual := reconciler.Conforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} diff --git a/controllers/topology/reconciler/crud.go b/controllers/topology/reconciler/crud.go new file mode 100644 index 00000000..869782f2 --- /dev/null +++ b/controllers/topology/reconciler/crud.go @@ -0,0 +1,115 @@ +package reconciler + +import ( + "context" + + apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func (r *Reconciler) createObj( + ctx context.Context, + ownerObj, + createObj ctrlruntimeclient.Object, + createObjKind string, +) error { + err := ctrlruntimeutil.SetOwnerReference(ownerObj, createObj, r.Client.Scheme()) + if err != nil { + return err + } + + r.Log.Debugf( + "creating %s '%s/%s'", + createObjKind, + createObj.GetNamespace(), + createObj.GetName(), + ) + + err = r.Client.Create(ctx, createObj) + if err != nil { + r.Log.Criticalf( + "failed creating %s '%s/%s' error: %s", + createObjKind, + createObj.GetNamespace(), + createObj.GetName(), + err, + ) + + return err + } + + return nil +} + +func (r *Reconciler) getObj( + ctx context.Context, + getObj ctrlruntimeclient.Object, + namespacedName apimachinerytypes.NamespacedName, + getObjKind string, +) error { + r.Log.Debugf( + "getting %s '%s/%s'", + getObjKind, + getObj.GetNamespace(), + getObj.GetName(), + ) + + return r.Client.Get(ctx, namespacedName, getObj) +} + +func (r *Reconciler) updateObj( + ctx context.Context, + updateObj ctrlruntimeclient.Object, + updateObjKind string, +) error { + r.Log.Debugf( + "updating %s '%s/%s'", + updateObjKind, + updateObj.GetNamespace(), + updateObj.GetName(), + ) + + err := r.Client.Update(ctx, updateObj) + if err != nil { + r.Log.Criticalf( + "failed updating %s '%s/%s' error: %s", + updateObjKind, + updateObj.GetNamespace(), + updateObj.GetName(), + err, + ) + + return err + } + + return nil +} + +func (r *Reconciler) deleteObj( + ctx context.Context, + deleteObj ctrlruntimeclient.Object, + deleteObjKind string, +) error { + r.Log.Debugf( + "deleting %s '%s/%s'", + deleteObjKind, + deleteObj.GetNamespace(), + deleteObj.GetName(), + ) + + err := r.Client.Delete(ctx, deleteObj) + if err != nil { + r.Log.Criticalf( + "failed deleting %s '%s/%s' error: %s", + deleteObjKind, + deleteObj.GetNamespace(), + deleteObj.GetName(), + err, + ) + + return err + } + + return nil +} diff --git a/controllers/topology/reconciler/deployment.go b/controllers/topology/reconciler/deployment.go new file mode 100644 index 00000000..b808d638 --- /dev/null +++ b/controllers/topology/reconciler/deployment.go @@ -0,0 +1,521 @@ +package reconciler + +import ( + "fmt" + "reflect" + "strings" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + + apimachinerytypes "k8s.io/apimachinery/pkg/types" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8sappsv1 "k8s.io/api/apps/v1" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewDeploymentReconciler returns an instance of DeploymentReconciler. +func NewDeploymentReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *DeploymentReconciler { + return &DeploymentReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// DeploymentReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating deployments for a +// clabernetes topology resource. +type DeploymentReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of deployments that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object +// that contains the missing, extra, and current deployments for the topology. +func (r *DeploymentReconciler) Resolve( + ownedDeployments *k8sappsv1.DeploymentList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + _ clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment], error) { + deployments := &clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment]{ + Current: map[string]*k8sappsv1.Deployment{}, + } + + for i := range ownedDeployments.Items { + labels := ownedDeployments.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + deployments.Current[nodeName] = &ownedDeployments.Items[i] + } + + allNodes := make([]string, len(clabernetesConfigs)) + + var nodeIdx int + + for nodeName := range clabernetesConfigs { + allNodes[nodeIdx] = nodeName + + nodeIdx++ + } + + deployments.SetMissing(allNodes) + deployments.SetExtra(allNodes) + + return deployments, nil +} + +func (r *DeploymentReconciler) renderDeploymentBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8sappsv1.Deployment { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + } + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(1)), + RevisionHistoryLimit: clabernetesutil.ToPointer(int32(0)), + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.PodSpec{ + Containers: []k8scorev1.Container{}, + RestartPolicy: "Always", + ServiceAccountName: "default", + Volumes: []k8scorev1.Volume{}, + }, + }, + }, + } +} + +func (r *DeploymentReconciler) renderDeploymentVolumes( + deployment *k8sappsv1.Deployment, + nodeName, + configVolumeName, + owningTopologyName string, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, +) []k8scorev1.VolumeMount { + volumes := []k8scorev1.Volume{ + { + Name: configVolumeName, + VolumeSource: k8scorev1.VolumeSource{ + ConfigMap: &k8scorev1.ConfigMapVolumeSource{ + LocalObjectReference: k8scorev1.LocalObjectReference{ + Name: owningTopologyName, + }, + }, + }, + }, + } + + volumesFromConfigMaps := make([]clabernetesapistopologyv1alpha1.FileFromConfigMap, 0) + + volumeMountsFromCommonSpec := make([]k8scorev1.VolumeMount, 0) + + for _, fileFromConfigMap := range owningTopologyCommonSpec.FilesFromConfigMap { + if fileFromConfigMap.NodeName != nodeName { + continue + } + + volumesFromConfigMaps = append(volumesFromConfigMaps, fileFromConfigMap) + } + + for _, podVolume := range volumesFromConfigMaps { + if !clabernetesutilkubernetes.VolumeAlreadyMounted( + podVolume.ConfigMapName, + deployment.Spec.Template.Spec.Volumes, + ) { + deployment.Spec.Template.Spec.Volumes = append( + deployment.Spec.Template.Spec.Volumes, + k8scorev1.Volume{ + Name: podVolume.ConfigMapName, + VolumeSource: k8scorev1.VolumeSource{ + ConfigMap: &k8scorev1.ConfigMapVolumeSource{ + LocalObjectReference: k8scorev1.LocalObjectReference{ + Name: podVolume.ConfigMapName, + }, + }, + }, + }, + ) + } + + volumeMount := k8scorev1.VolumeMount{ + Name: podVolume.ConfigMapName, + ReadOnly: false, + MountPath: fmt.Sprintf("/clabernetes/%s", podVolume.FilePath), + SubPath: podVolume.ConfigMapPath, + } + + volumeMountsFromCommonSpec = append( + volumeMountsFromCommonSpec, + volumeMount, + ) + } + + deployment.Spec.Template.Spec.Volumes = volumes + + return volumeMountsFromCommonSpec +} + +func (r *DeploymentReconciler) renderDeploymentContainer( + deployment *k8sappsv1.Deployment, + nodeName, + configVolumeName string, + volumeMountsFromCommonSpec []k8scorev1.VolumeMount, +) { + container := k8scorev1.Container{ + Name: nodeName, + WorkingDir: "/clabernetes", + Image: clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherImageEnv, + clabernetesconstants.LauncherDefaultImage, + ), + Command: []string{"/clabernetes/manager", "launch"}, + Ports: []k8scorev1.ContainerPort{ + { + Name: "vxlan", + ContainerPort: clabernetesconstants.VXLANServicePort, + Protocol: "UDP", + }, + }, + VolumeMounts: []k8scorev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: true, + MountPath: "/clabernetes/topo.clab.yaml", + SubPath: nodeName, + }, + { + Name: configVolumeName, + ReadOnly: true, + MountPath: "/clabernetes/tunnels.yaml", + SubPath: fmt.Sprintf("%s-tunnels", nodeName), + }, + }, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + ImagePullPolicy: k8scorev1.PullPolicy( + clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherPullPolicyEnv, + "IfNotPresent", + ), + ), + SecurityContext: &k8scorev1.SecurityContext{ + // obviously we need privileged for dind setup + Privileged: clabernetesutil.ToPointer(true), + RunAsUser: clabernetesutil.ToPointer(int64(0)), + }, + } + + container.VolumeMounts = append(container.VolumeMounts, volumeMountsFromCommonSpec...) + + deployment.Spec.Template.Spec.Containers = []k8scorev1.Container{container} +} + +func (r *DeploymentReconciler) renderDeploymentContainerEnv( + deployment *k8sappsv1.Deployment, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, +) { + launcherLogLevel := clabernetesutil.GetEnvStrOrDefault( + clabernetesconstants.LauncherLoggerLevelEnv, + clabernetesconstants.Info, + ) + + if owningTopologyCommonSpec.LauncherLogLevel != "" { + launcherLogLevel = owningTopologyCommonSpec.LauncherLogLevel + } + + envs := []k8scorev1.EnvVar{ + { + Name: clabernetesconstants.LauncherLoggerLevelEnv, + Value: launcherLogLevel, + }, + } + + if owningTopologyCommonSpec.ContainerlabDebug { + envs = append( + envs, + k8scorev1.EnvVar{ + Name: clabernetesconstants.LauncherContainerlabDebug, + Value: clabernetesconstants.True, + }, + ) + } + + if len(owningTopologyCommonSpec.InsecureRegistries) > 0 { + envs = append( + envs, + k8scorev1.EnvVar{ + Name: clabernetesconstants.LauncherInsecureRegistries, + Value: strings.Join(owningTopologyCommonSpec.InsecureRegistries, ","), + }, + ) + } + + deployment.Spec.Template.Spec.Containers[0].Env = envs +} + +func (r *DeploymentReconciler) renderDeploymentContainerResources( + deployment *k8sappsv1.Deployment, + nodeName string, + owningTopologyCommonSpec *clabernetesapistopologyv1alpha1.TopologyCommonSpec, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) { + nodeResources, nodeResourcesOk := owningTopologyCommonSpec.Resources[nodeName] + if nodeResourcesOk { + deployment.Spec.Template.Spec.Containers[0].Resources = nodeResources + + return + } + + defaultResources, defaultResourcesOk := owningTopologyCommonSpec.Resources[clabernetesconstants.Default] //nolint:lll + if defaultResourcesOk { + deployment.Spec.Template.Spec.Containers[0].Resources = defaultResources + + return + } + + resources := r.configManagerGetter().GetResourcesForContainerlabKind( + clabernetesConfigs[nodeName].Topology.GetNodeKindType(nodeName), + ) + + if resources != nil { + deployment.Spec.Template.Spec.Containers[0].Resources = *resources + } +} + +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final deployment for this node. +func (r *DeploymentReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) *k8sappsv1.Deployment { + owningTopologyName := owningTopology.GetName() + + owningTopologyCommonSpec := owningTopology.GetTopologyCommonSpec() + + configVolumeName := fmt.Sprintf("%s-config", owningTopologyName) + + deployment := r.renderDeploymentBase( + fmt.Sprintf("%s-%s", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + volumeMountsFromCommonSpec := r.renderDeploymentVolumes( + deployment, + nodeName, + configVolumeName, + owningTopologyName, + &owningTopologyCommonSpec, + ) + + r.renderDeploymentContainer( + deployment, + nodeName, + configVolumeName, + volumeMountsFromCommonSpec, + ) + + r.renderDeploymentContainerEnv( + deployment, + &owningTopologyCommonSpec, + ) + + r.renderDeploymentContainerResources( + deployment, + nodeName, + &owningTopologyCommonSpec, + clabernetesConfigs, + ) + + return deployment +} + +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final deployments for the given nodes. +func (r *DeploymentReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeNames []string, +) []*k8sappsv1.Deployment { + deployments := make([]*k8sappsv1.Deployment, len(nodeNames)) + + for idx, nodeName := range nodeNames { + deployments[idx] = r.Render( + owningTopology, + clabernetesConfigs, + nodeName, + ) + } + + return deployments +} + +// Conforms checks if the existingDeployment conforms with the renderedDeployment. +func (r *DeploymentReconciler) Conforms( + existingDeployment, + renderedDeployment *k8sappsv1.Deployment, + expectedOwnerUID apimachinerytypes.UID, +) bool { + if !reflect.DeepEqual(existingDeployment.Spec.Replicas, renderedDeployment.Spec.Replicas) { + return false + } + + if !reflect.DeepEqual(existingDeployment.Spec.Selector, renderedDeployment.Spec.Selector) { + return false + } + + if !clabernetesutilkubernetes.ContainersEqual( + existingDeployment.Spec.Template.Spec.Containers, + renderedDeployment.Spec.Template.Spec.Containers, + ) { + return false + } + + if !reflect.DeepEqual( + existingDeployment.Spec.Template.Spec.ServiceAccountName, + renderedDeployment.Spec.Template.Spec.ServiceAccountName, + ) { + return false + } + + if !reflect.DeepEqual( + existingDeployment.Spec.Template.Spec.RestartPolicy, + renderedDeployment.Spec.Template.Spec.RestartPolicy, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Annotations, + renderedDeployment.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingDeployment.ObjectMeta.Labels, + renderedDeployment.ObjectMeta.Labels, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingDeployment.Spec.Template.ObjectMeta.Annotations, + renderedDeployment.Spec.Template.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingDeployment.Spec.Template.ObjectMeta.Labels, + renderedDeployment.Spec.Template.ObjectMeta.Labels, + ) { + return false + } + + if len(existingDeployment.ObjectMeta.OwnerReferences) != 1 { + // we should have only one owner reference, the extractor + return false + } + + if existingDeployment.ObjectMeta.OwnerReferences[0].UID != expectedOwnerUID { + // owner ref uid is not us + return false + } + + return true +} + +// DetermineNodesNeedingRestart accepts a mapping of the previously stored clabernetes +// sub-topologies and the current reconcile loops rendered topologies and returns a slice of node +// names whose deployments need restarting due to configuration changes. +func (r *DeploymentReconciler) DetermineNodesNeedingRestart( + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) []string { + var nodesNeedingRestart []string + + for nodeName, nodeConfig := range currentClabernetesConfigs { + _, nodeExistedBefore := previousClabernetesConfigs[nodeName] + if !nodeExistedBefore { + continue + } + + if !reflect.DeepEqual(nodeConfig, previousClabernetesConfigs[nodeName]) { + nodesNeedingRestart = append( + nodesNeedingRestart, + nodeName, + ) + } + } + + return nodesNeedingRestart +} diff --git a/controllers/topology/reconciler/deployment_test.go b/controllers/topology/reconciler/deployment_test.go new file mode 100644 index 00000000..857a475c --- /dev/null +++ b/controllers/topology/reconciler/deployment_test.go @@ -0,0 +1,984 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutil "github.com/srl-labs/clabernetes/util" + k8sappsv1 "k8s.io/api/apps/v1" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +const renderDeploymentTestName = "deployment/render-deployment" + +func TestResolveDeployment(t *testing.T) { + cases := []struct { + name string + ownedDeployments *k8sappsv1.DeploymentList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8sappsv1.Deployment + }{ + { + name: "simple", + ownedDeployments: &k8sappsv1.DeploymentList{}, + clabernetesConfigs: nil, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8sappsv1.Deployment{}, + }, + { + name: "missing-nodes", + ownedDeployments: &k8sappsv1.DeploymentList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8sappsv1.Deployment{}, + }, + { + name: "extra-nodes", + ownedDeployments: &k8sappsv1.DeploymentList{ + Items: []k8sappsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-deployment-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8sappsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-deployment-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedDeployments, + testCase.clabernetesConfigs, + nil, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + +func TestRenderDeployment(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "containerlab-debug", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + ContainerlabDebug: true, + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "launcher-log-level", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + LauncherLogLevel: "debug", + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux + `, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + { + name: "insecure-registries", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{ + InsecureRegistries: []string{"1.2.3.4", "potato.com"}, + }, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux + `, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.clabernetesConfigs, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + got, + ) + } + + var want k8sappsv1.Deployment + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf("golden/%s/%s.json", renderDeploymentTestName, testCase.name), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} + +func TestDeploymentConforms(t *testing.T) { + cases := []struct { + name string + existing *k8sappsv1.Deployment + rendered *k8sappsv1.Deployment + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-replicas", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(100)), + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Replicas: clabernetesutil.ToPointer(int32(1)), + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-selector", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "something": "something", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "something": "different", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-containers", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{}, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + Containers: []k8scorev1.Container{ + { + Name: "some-container", + }, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-service-account", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + ServiceAccountName: "something-else", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + ServiceAccountName: "default", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-restart-policy", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + RestartPolicy: "Never", + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + Spec: k8scorev1.PodSpec{ + RestartPolicy: "Always", + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + + // object meta annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // object meta labels + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // template object meta annotations + + { + name: "missing-clabernetes-global-annotations", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + // template object meta labels + + { + name: "missing-clabernetes-global-labels", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-labels-wrong-value", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "xyz", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8sappsv1.DeploymentSpec{ + Template: k8scorev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + + { + name: "bad-owner", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8sappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8sappsv1.Deployment{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewDeploymentReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + actual := reconciler.Conforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} diff --git a/controllers/topology/reconciler/reconciler.go b/controllers/topology/reconciler/reconciler.go new file mode 100644 index 00000000..26dfcc2b --- /dev/null +++ b/controllers/topology/reconciler/reconciler.go @@ -0,0 +1,547 @@ +package reconciler + +import ( + "context" + "fmt" + "slices" + "time" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + k8sappsv1 "k8s.io/api/apps/v1" + + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + "gopkg.in/yaml.v3" + k8scorev1 "k8s.io/api/core/v1" + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimereconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ResourceListerFunc represents a function that can list the objects that a topology controller +// is responsible for. +type ResourceListerFunc func( + ctx context.Context, + client ctrlruntimeclient.Client, +) ([]ctrlruntimeclient.Object, error) + +// NewReconciler creates a new generic Reconciler (TopologyReconciler). +func NewReconciler( + log claberneteslogging.Instance, + client ctrlruntimeclient.Client, + owningTopologyKind string, + resourceLister ResourceListerFunc, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *Reconciler { + return &Reconciler{ + Log: log, + Client: client, + ResourceKind: owningTopologyKind, + ResourceLister: resourceLister, + + configMapReconciler: NewConfigMapReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + deploymentReconciler: NewDeploymentReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + serviceFabricReconciler: NewServiceFabricReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + serviceExposeReconciler: NewServiceExposeReconciler( + log, + owningTopologyKind, + configManagerGetter, + ), + } +} + +// Reconciler (TopologyReconciler) is the base clabernetes topology reconciler that is embedded in +// all clabernetes topology controllers, it provides common methods for reconciling the +// common/standard resources that represent a clabernetes object (configmap, deployments, +// services, etc.). +type Reconciler struct { + Log claberneteslogging.Instance + Client ctrlruntimeclient.Client + ResourceKind string + ResourceLister ResourceListerFunc + + configMapReconciler *ConfigMapReconciler + deploymentReconciler *DeploymentReconciler + serviceFabricReconciler *ServiceFabricReconciler + serviceExposeReconciler *ServiceExposeReconciler +} + +// ReconcileConfigMap reconciles the primary configmap containing clabernetes configs and tunnel +// information. +func (r *Reconciler) ReconcileConfigMap( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + tunnels map[string][]*clabernetesapistopologyv1alpha1.Tunnel, +) error { + namespacedName := apimachinerytypes.NamespacedName{ + Namespace: owningTopology.GetNamespace(), + Name: owningTopology.GetName(), + } + + renderedConfigMap, err := r.configMapReconciler.Render( + namespacedName, + clabernetesConfigs, + tunnels, + ) + if err != nil { + return err + } + + existingConfigMap := &k8scorev1.ConfigMap{} + + err = r.Client.Get( + ctx, + namespacedName, + existingConfigMap, + ) + if err != nil { + if apimachineryerrors.IsNotFound(err) { + return r.createObj( + ctx, + owningTopology, + renderedConfigMap, + clabernetesconstants.KubernetesConfigMap, + ) + } + + return err + } + + if r.configMapReconciler.Conforms( + existingConfigMap, + renderedConfigMap, + owningTopology.GetUID(), + ) { + return nil + } + + return r.updateObj(ctx, renderedConfigMap, clabernetesconstants.KubernetesConfigMap) +} + +func (r *Reconciler) reconcileDeploymentsHandleRestarts( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + deployments *clabernetesutil.ObjectDiffer[*k8sappsv1.Deployment], +) error { + r.Log.Info("determining nodes needing restart") + + nodesNeedingRestart := r.deploymentReconciler.DetermineNodesNeedingRestart( + previousClabernetesConfigs, + currentClabernetesConfigs, + ) + if len(nodesNeedingRestart) == 0 { + return nil + } + + for _, nodeName := range nodesNeedingRestart { + if slices.Contains(deployments.Missing, nodeName) { + // is a new node, don't restart, we'll deploy it soon + continue + } + + r.Log.Infof( + "restarting the nodes '%s' as configurations have changed", + nodesNeedingRestart, + ) + + deploymentName := fmt.Sprintf("%s-%s", owningTopology.GetName(), nodeName) + + nodeDeployment := &k8sappsv1.Deployment{} + + err := r.getObj( + ctx, + nodeDeployment, + apimachinerytypes.NamespacedName{ + Namespace: owningTopology.GetNamespace(), + Name: deploymentName, + }, + clabernetesconstants.KubernetesDeployment, + ) + if err != nil { + if apimachineryerrors.IsNotFound(err) { + r.Log.Warnf( + "could not find deployment '%s', cannot restart after config change,"+ + " this should not happen", + deploymentName, + ) + + return nil + } + + return err + } + + if nodeDeployment.Spec.Template.ObjectMeta.Annotations == nil { + nodeDeployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} + } + + now := time.Now().Format(time.RFC3339) + + nodeDeployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = now //nolint:lll + + err = r.updateObj(ctx, nodeDeployment, clabernetesconstants.KubernetesDeployment) + if err != nil { + return err + } + } + + return nil +} + +// ReconcileDeployments reconciles the deployments that make up a clabernetes Topology. +func (r *Reconciler) ReconcileDeployments( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + previousClabernetesConfigs, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) error { + deployments, err := reconcileResolve( + ctx, + r, + &k8sappsv1.Deployment{}, + &k8sappsv1.DeploymentList{}, + clabernetesconstants.KubernetesDeployment, + owningTopology, + currentClabernetesConfigs, + r.deploymentReconciler.Resolve, + ) + if err != nil { + return err + } + + r.Log.Info("pruning extraneous deployments") + + for _, extraDeployment := range deployments.Extra { + err = r.deleteObj(ctx, extraDeployment, clabernetesconstants.KubernetesDeployment) + if err != nil { + return err + } + } + + r.Log.Info("creating missing deployments") + + renderedMissingDeployments := r.deploymentReconciler.RenderAll( + owningTopology, + currentClabernetesConfigs, + deployments.Missing, + ) + + for _, renderedMissingDeployment := range renderedMissingDeployments { + err = r.createObj( + ctx, + owningTopology, + renderedMissingDeployment, + clabernetesconstants.KubernetesDeployment, + ) + if err != nil { + return err + } + } + + r.Log.Info("enforcing desired state on existing deployments") + + for existingCurrentDeploymentNodeName, existingCurrentDeployment := range deployments.Current { + renderedCurrentDeployment := r.deploymentReconciler.Render( + owningTopology, + currentClabernetesConfigs, + existingCurrentDeploymentNodeName, + ) + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentDeployment, + r.Client.Scheme(), + ) + if err != nil { + return err + } + + if !r.deploymentReconciler.Conforms( + existingCurrentDeployment, + renderedCurrentDeployment, + owningTopology.GetUID(), + ) { + err = r.updateObj( + ctx, + renderedCurrentDeployment, + clabernetesconstants.KubernetesDeployment, + ) + if err != nil { + return err + } + } + } + + return r.reconcileDeploymentsHandleRestarts( + ctx, + owningTopology, + previousClabernetesConfigs, + currentClabernetesConfigs, + deployments, + ) +} + +// ReconcileServiceFabric reconciles the service used for "fabric" (inter node) connectivity. +func (r *Reconciler) ReconcileServiceFabric( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) error { + serviceTypeName := fmt.Sprintf("fabric %s", clabernetesconstants.KubernetesService) + + services, err := reconcileResolve( + ctx, + r, + &k8scorev1.Service{}, + &k8scorev1.ServiceList{}, + serviceTypeName, + owningTopology, + currentClabernetesConfigs, + r.serviceExposeReconciler.Resolve, + ) + if err != nil { + return err + } + + r.Log.Info("pruning extraneous fabric services") + + for _, extraService := range services.Extra { + err = r.deleteObj( + ctx, + extraService, + serviceTypeName, + ) + if err != nil { + return err + } + } + + r.Log.Info("creating missing fabric services") + + renderedMissingServices := r.serviceFabricReconciler.RenderAll( + owningTopology, + services.Missing, + ) + + for _, renderedMissingService := range renderedMissingServices { + err = r.createObj( + ctx, + owningTopology, + renderedMissingService, + serviceTypeName, + ) + if err != nil { + return err + } + } + + r.Log.Info("enforcing desired state on fabric services") + + for existingCurrentServiceNodeName, existingCurrentService := range services.Current { + renderedCurrentService := r.serviceFabricReconciler.Render( + owningTopology, + existingCurrentServiceNodeName, + ) + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentService, + r.Client.Scheme(), + ) + if err != nil { + return err + } + + if !r.serviceFabricReconciler.Conforms( + existingCurrentService, + renderedCurrentService, + owningTopology.GetUID(), + ) { + err = r.updateObj( + ctx, + renderedCurrentService, + serviceTypeName, + ) + if err != nil { + return err + } + } + } + + return nil +} + +// ReconcileServicesExpose reconciles the service(s) used for exposing nodes. +func (r *Reconciler) ReconcileServicesExpose( + ctx context.Context, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, +) (bool, error) { + serviceTypeName := fmt.Sprintf("expose %s", clabernetesconstants.KubernetesService) + + var shouldUpdate bool + + owningTopologyStatus := owningTopology.GetTopologyStatus() + + if owningTopologyStatus.NodeExposedPorts == nil { + owningTopologyStatus.NodeExposedPorts = map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{} //nolint:lll + + shouldUpdate = true + } + + services, err := reconcileResolve( + ctx, + r, + &k8scorev1.Service{}, + &k8scorev1.ServiceList{}, + serviceTypeName, + owningTopology, + currentClabernetesConfigs, + r.serviceExposeReconciler.Resolve, + ) + if err != nil { + return shouldUpdate, err + } + + r.Log.Info("pruning extraneous services") + + for _, extraDeployment := range services.Extra { + err = r.deleteObj( + ctx, + extraDeployment, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } + } + + r.Log.Info("creating missing services") + + renderedMissingServices := r.serviceExposeReconciler.RenderAll( + owningTopology, + &owningTopologyStatus, + currentClabernetesConfigs, + services.Missing, + ) + + for _, renderedMissingService := range renderedMissingServices { + err = r.createObj( + ctx, + owningTopology, + renderedMissingService, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } + } + + for existingCurrentServiceNodeName, existingCurrentService := range services.Current { + renderedCurrentService := r.serviceExposeReconciler.Render( + owningTopology, + &owningTopologyStatus, + currentClabernetesConfigs, + existingCurrentServiceNodeName, + ) + + if len(existingCurrentService.Status.LoadBalancer.Ingress) == 1 { + // can/would this ever be more than 1? i dunno? + address := existingCurrentService.Status.LoadBalancer.Ingress[0].IP + if address != "" { + owningTopologyStatus.NodeExposedPorts[existingCurrentServiceNodeName].LoadBalancerAddress = address //nolint:lll + } + } + + err = ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedCurrentService, + r.Client.Scheme(), + ) + if err != nil { + return shouldUpdate, err + } + + if !r.serviceExposeReconciler.Conforms( + existingCurrentService, + renderedCurrentService, + owningTopology.GetUID(), + ) { + err = r.updateObj( + ctx, + renderedCurrentService, + serviceTypeName, + ) + if err != nil { + return shouldUpdate, err + } + } + } + + nodeExposedPortsBytes, err := yaml.Marshal(owningTopologyStatus.NodeExposedPorts) + if err != nil { + return shouldUpdate, err + } + + newNodeExposedPortsHash := clabernetesutil.HashBytes(nodeExposedPortsBytes) + + if owningTopologyStatus.NodeExposedPortsHash != newNodeExposedPortsHash { + owningTopologyStatus.NodeExposedPortsHash = newNodeExposedPortsHash + + owningTopology.SetTopologyStatus(owningTopologyStatus) + + shouldUpdate = true + } + + return shouldUpdate, nil +} + +// EnqueueForAll enqueues a reconcile for kinds the Reconciler represents. This is probably not very +// efficient/good but we should have low volume and we're using the cached ctrlruntime client so its +// probably ok :). +func (r *Reconciler) EnqueueForAll( + ctx context.Context, + _ ctrlruntimeclient.Object, +) []ctrlruntimereconcile.Request { + objList, err := r.ResourceLister(ctx, r.Client) + if err != nil { + r.Log.Criticalf("failed listing resource objects in EnqueueForAll, err: %s", err) + + return nil + } + + requests := make([]ctrlruntimereconcile.Request, len(objList)) + + for idx := range objList { + requests[idx] = ctrlruntimereconcile.Request{ + NamespacedName: apimachinerytypes.NamespacedName{ + Namespace: objList[idx].GetNamespace(), + Name: objList[idx].GetName(), + }, + } + } + + return requests +} diff --git a/controllers/topology/reconciler/reconciler_test.go b/controllers/topology/reconciler/reconciler_test.go new file mode 100644 index 00000000..4020b9ec --- /dev/null +++ b/controllers/topology/reconciler/reconciler_test.go @@ -0,0 +1,14 @@ +package reconciler_test + +import ( + "os" + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" +) + +func TestMain(m *testing.M) { + clabernetestesthelper.Flags() + + os.Exit(m.Run()) +} diff --git a/controllers/topology/reconciler/resolve.go b/controllers/topology/reconciler/resolve.go new file mode 100644 index 00000000..4b401c97 --- /dev/null +++ b/controllers/topology/reconciler/resolve.go @@ -0,0 +1,66 @@ +package reconciler + +import ( + "context" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func reconcileResolve[T ctrlruntimeclient.Object, TL ctrlruntimeclient.ObjectList]( + ctx context.Context, + reconciler *Reconciler, + ownedType T, + ownedTypeListing TL, + ownedTypeName string, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + resolveFunc func( + ownedObject TL, + currentClabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + ) (*clabernetesutil.ObjectDiffer[T], error), +) (*clabernetesutil.ObjectDiffer[T], error) { + // strictly passed for typing reasons + _ = ownedType + + err := reconciler.Client.List( + ctx, + ownedTypeListing, + ctrlruntimeclient.InNamespace(owningTopology.GetNamespace()), + ctrlruntimeclient.MatchingLabels{ + clabernetesconstants.LabelTopologyOwner: owningTopology.GetName(), + }, + ) + if err != nil { + reconciler.Log.Criticalf("failed fetching owned deployments, error: '%s'", err) + + return nil, err + } + + resolved, err := resolveFunc(ownedTypeListing, currentClabernetesConfigs, owningTopology) + if err != nil { + reconciler.Log.Criticalf("failed resolving owned deployments, error: '%s'", err) + + return nil, err + } + + reconciler.Log.Debugf( + "%ss are missing for the following nodes: %s", + ownedTypeName, + resolved.Missing, + ) + + reconciler.Log.Debugf( + "extraneous %ss exist for following nodes: %s", + ownedTypeName, + resolved.Extra, + ) + + return resolved, nil +} diff --git a/controllers/topology/service.go b/controllers/topology/reconciler/service.go similarity index 72% rename from controllers/topology/service.go rename to controllers/topology/reconciler/service.go index 1dec01e9..1de48e11 100644 --- a/controllers/topology/service.go +++ b/controllers/topology/reconciler/service.go @@ -1,12 +1,9 @@ -package topology +package reconciler import ( "reflect" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" k8scorev1 "k8s.io/api/core/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" @@ -55,14 +52,14 @@ func ServiceConforms( } } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingService.ObjectMeta.Annotations, renderedService.ObjectMeta.Annotations, ) { return false } - if !clabernetescontrollers.AnnotationsOrLabelsConform( + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( existingService.ObjectMeta.Labels, renderedService.ObjectMeta.Labels, ) { @@ -81,12 +78,3 @@ func ServiceConforms( return true } - -// GetServiceDNSSuffix returns the default "svc.cluster.local" dns suffix, or the user's provided -// override value. -func GetServiceDNSSuffix() string { - return clabernetesutil.GetEnvStrOrDefault( - clabernetesconstants.InClusterDNSSuffixEnv, - clabernetesconstants.DefaultInClusterDNSSuffix, - ) -} diff --git a/controllers/topology/reconciler/service_test.go b/controllers/topology/reconciler/service_test.go new file mode 100644 index 00000000..53d3680b --- /dev/null +++ b/controllers/topology/reconciler/service_test.go @@ -0,0 +1,469 @@ +package reconciler_test + +import ( + "testing" + + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + + "k8s.io/apimachinery/pkg/util/intstr" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + k8scorev1 "k8s.io/api/core/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +func TestServiceConforms(t *testing.T) { + cases := []struct { + name string + existing *k8scorev1.Service + rendered *k8scorev1.Service + ownerUID apimachinerytypes.UID + conforms bool + }{ + { + name: "simple", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "conforms", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + "someextraannotations": "extraisok", + }, + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + "clabernetes/app": "clabernetes", + "someextralabel": "extraisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + { + Name: "ssh-for-reasons", + Protocol: "TCP", + Port: 22, + TargetPort: intstr.IntOrString{ + IntVal: 22, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + Labels: map[string]string{ + "user-provided-global-label": "expected-value", + "clabernetes/app": "clabernetes", + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + { + Name: "ssh-for-reasons", + Protocol: "TCP", + Port: 22, + TargetPort: intstr.IntOrString{ + IntVal: 22, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-selector", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Selector: map[string]string{ + "something": "something", + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Selector: map[string]string{ + "different": "different", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-type", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Type: "ClusterIP", + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Type: "NodePort", + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "bad-port-number", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 99, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "no-matching-port-name", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "something-else", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "port-target-mismatch", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 23, + }, + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "telnet-cuz-sekurity", + Protocol: "TCP", + Port: 23, + TargetPort: intstr.IntOrString{ + IntVal: 99, + }, + }, + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "missing-clabernetes-global-annotations", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-global-annotations-wrong-value", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "user-provided-global-annotation": "expected-value", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-annotations-ok", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "missing-clabernetes-labels", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelse": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "clabernetes", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "clabernetes-labels-wrong-value", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "xyz", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "clabernetes/app": "clabernetes", + }, + }, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "extra-labels-ok", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "somethingelseentirely": "thisisok", + }, + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{}, + }, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: true, + }, + { + name: "bad-owner", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + { + name: "multiple-owner", + existing: &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + UID: apimachinerytypes.UID("evil-imposter"), + }, + { + UID: apimachinerytypes.UID("clabernetes-testing"), + }, + }, + }, + }, + rendered: &k8scorev1.Service{}, + ownerUID: apimachinerytypes.UID("clabernetes-testing"), + conforms: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetescontrollerstopologyreconciler.ServiceConforms( + testCase.existing, + testCase.rendered, + testCase.ownerUID, + ) + if actual != testCase.conforms { + clabernetestesthelper.FailOutput(t, testCase.existing, testCase.rendered) + } + }) + } +} diff --git a/controllers/topology/reconciler/serviceexpose.go b/controllers/topology/reconciler/serviceexpose.go new file mode 100644 index 00000000..ed9d1db7 --- /dev/null +++ b/controllers/topology/reconciler/serviceexpose.go @@ -0,0 +1,285 @@ +package reconciler + +import ( + "fmt" + "sort" + "strings" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8scorev1 "k8s.io/api/core/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" +) + +// NewServiceExposeReconciler returns an instance of ServiceExposeReconciler. +func NewServiceExposeReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ServiceExposeReconciler { + return &ServiceExposeReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// ServiceExposeReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating the "expose" service for a +// clabernetes topology resource. +type ServiceExposeReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of services that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object +// that contains the missing, extra, and current services for the topology. +func (r *ServiceExposeReconciler) Resolve( + ownedServices *k8scorev1.ServiceList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutil.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutil.ObjectDiffer[*k8scorev1.Service]{ + Current: map[string]*k8scorev1.Service{}, + } + + for i := range ownedServices.Items { + labels := ownedServices.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] + + if topologyServiceType != clabernetesconstants.TopologyServiceTypeExpose { + // not the kind of service we're looking for here, we only care about the services + // used for exposing nodes here. + continue + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + services.Current[nodeName] = &ownedServices.Items[i] + } + + exposedNodes := make([]string, 0) + + disableAutoExpose := owningTopology.GetTopologyCommonSpec().DisableAutoExpose + + for nodeName, nodeData := range clabernetesConfigs { + // if disable auto expose is true *and* there are no ports defined for the node *and* + // there are no default ports defined for the topology we can skip the node from an expose + // perspective. + if disableAutoExpose && + len(nodeData.Topology.Nodes[nodeName].Ports) == 0 && + len(nodeData.Topology.Defaults.Ports) == 0 { + continue + } + + exposedNodes = append(exposedNodes, nodeName) + } + + services.SetMissing(exposedNodes) + services.SetExtra(exposedNodes) + + return services, nil +} + +func (r *ServiceExposeReconciler) renderServiceBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8scorev1.Service { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + + } + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.ServiceSpec{ + Selector: selectorLabels, + Type: clabernetesconstants.KubernetesServiceLoadBalancerType, + }, + } +} + +func (r *ServiceExposeReconciler) parseContainerlabTopologyPortsSection( + portDefinition string, +) (bool, *k8scorev1.ServicePort) { + typedPort, err := clabernetesutilcontainerlab.ProcessPortDefinition(portDefinition) + if err != nil { + r.log.Warnf("skipping port due to the following error: %s", err) + + return true, nil + } + + return false, &k8scorev1.ServicePort{ + Name: fmt.Sprintf( + "port-%d-%s", typedPort.DestinationPort, strings.ToLower(typedPort.Protocol), + ), + Protocol: k8scorev1.Protocol(typedPort.Protocol), + Port: int32(typedPort.DestinationPort), + TargetPort: intstr.IntOrString{ + IntVal: int32(typedPort.ExposePort), + }, + } +} + +func (r *ServiceExposeReconciler) renderServicePorts( + service *k8scorev1.Service, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) { + owningTopologyStatus.NodeExposedPorts[nodeName] = &clabernetesapistopologyv1alpha1.ExposedPorts{ + TCPPorts: make([]int, 0), + UDPPorts: make([]int, 0), + } + + ports := make([]k8scorev1.ServicePort, 0) + + // for actual containerlab configs we copy the users given defaults into each "sub topology" -- + // so in the case of containerlab we want to make sure we also iterate over the "default" or + // topology wide ports that were specified. in this process we dont want to duplicate things, so + // we use a simple set implementation to make sure we aren't doubling up on any port + // definitions. + allContainerlabPorts := clabernetesutil.NewStringSet() + + allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Nodes[nodeName].Ports) + + allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Defaults.Ports) + + allContainerlabPortsItems := allContainerlabPorts.Items() + sort.Strings(allContainerlabPortsItems) + + for _, portDefinition := range allContainerlabPortsItems { + shouldSkip, port := r.parseContainerlabTopologyPortsSection(portDefinition) + + if shouldSkip { + continue + } + + ports = append(ports, *port) + + // dont forget to update the exposed ports status bits + if port.Protocol == clabernetesconstants.TCP { + owningTopologyStatus.NodeExposedPorts[nodeName].TCPPorts = append( + owningTopologyStatus.NodeExposedPorts[nodeName].TCPPorts, + int(port.Port), + ) + } else { + owningTopologyStatus.NodeExposedPorts[nodeName].UDPPorts = append( + owningTopologyStatus.NodeExposedPorts[nodeName].UDPPorts, + int(port.Port), + ) + } + } + + service.Spec.Ports = ports +} + +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final expose service for this node. +func (r *ServiceExposeReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeName string, +) *k8scorev1.Service { + owningTopologyName := owningTopology.GetName() + + service := r.renderServiceBase( + fmt.Sprintf("%s-%s", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + r.renderServicePorts( + service, + owningTopologyStatus, + clabernetesConfigs, + nodeName, + ) + + return service +} + +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final expose services for the given nodes. +func (r *ServiceExposeReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + nodeNames []string, +) []*k8scorev1.Service { + services := make([]*k8scorev1.Service, len(nodeNames)) + + for idx, nodeName := range nodeNames { + services[idx] = r.Render( + owningTopology, + owningTopologyStatus, + clabernetesConfigs, + nodeName, + ) + } + + return services +} + +// Conforms checks if the existingService conforms with the renderedService. +func (r *ServiceExposeReconciler) Conforms( + existingService, + renderedService *k8scorev1.Service, + expectedOwnerUID apimachinerytypes.UID, +) bool { + return ServiceConforms(existingService, renderedService, expectedOwnerUID) +} diff --git a/controllers/topology/reconciler/serviceexpose_test.go b/controllers/topology/reconciler/serviceexpose_test.go new file mode 100644 index 00000000..2c385a6c --- /dev/null +++ b/controllers/topology/reconciler/serviceexpose_test.go @@ -0,0 +1,344 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const renderServiceExposeTestName = "serviceexpose/render-service" + +func TestResolveServiceExpose(t *testing.T) { + cases := []struct { + name string + ownedServices *k8scorev1.ServiceList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8scorev1.Service + }{ + { + name: "simple", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{}, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-deployment-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "missing-nodes", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "extra-nodes", + ownedServices: &k8scorev1.ServiceList{ + Items: []k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceExposeReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedServices, + testCase.clabernetesConfigs, + testCase.owningTopologyObject, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + +func TestRenderServiceExpose(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + owningTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-service-expose-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + owningTopologyStatus: &clabernetesapistopologyv1alpha1.TopologyStatus{ + Tunnels: map[string][]*clabernetesapistopologyv1alpha1.Tunnel{}, + NodeExposedPorts: map[string]*clabernetesapistopologyv1alpha1.ExposedPorts{}, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "srl1": { + Name: "srl1", + Prefix: clabernetesutil.ToPointer(""), + Topology: &clabernetesutilcontainerlab.Topology{ + Defaults: &clabernetesutilcontainerlab.NodeDefinition{ + Ports: []string{ + "21022:22/tcp", + "21023:23/tcp", + "21161:161/udp", + "33333:57400/tcp", + "60000:21/tcp", + "60001:80/tcp", + "60002:443/tcp", + "60003:830/tcp", + "60004:5000/tcp", + "60005:5900/tcp", + "60006:6030/tcp", + "60007:9339/tcp", + "60008:9340/tcp", + "60009:9559/tcp", + }, + }, + Kinds: nil, + Nodes: map[string]*clabernetesutilcontainerlab.NodeDefinition{ + "srl1": { + Kind: "srl", + Image: "ghcr.io/nokia/srlinux", + }, + }, + Links: nil, + }, + Debug: false, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceExposeReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.owningTopologyStatus, + testCase.clabernetesConfigs, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceExposeTestName, + testCase.name, + ), + got, + ) + + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s-status.json", + renderServiceExposeTestName, + testCase.name, + ), + testCase.owningTopologyStatus, + ) + } + + var want k8scorev1.Service + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceExposeTestName, + testCase.name, + ), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + var wantStatus *clabernetesapistopologyv1alpha1.TopologyStatus + + err = json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s-status.json", + renderServiceExposeTestName, + testCase.name, + ), + ), + &wantStatus, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + + // also check that the status got updated properly + if !reflect.DeepEqual(testCase.owningTopologyStatus, wantStatus) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} diff --git a/controllers/topology/reconciler/servicefabric.go b/controllers/topology/reconciler/servicefabric.go new file mode 100644 index 00000000..e024a8b3 --- /dev/null +++ b/controllers/topology/reconciler/servicefabric.go @@ -0,0 +1,198 @@ +package reconciler + +import ( + "fmt" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + + claberneteslogging "github.com/srl-labs/clabernetes/logging" + + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteserrors "github.com/srl-labs/clabernetes/errors" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NewServiceFabricReconciler returns an instance of ServiceFabricReconciler. +func NewServiceFabricReconciler( + log claberneteslogging.Instance, + owningTopologyKind string, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ServiceFabricReconciler { + return &ServiceFabricReconciler{ + log: log, + owningTopologyKind: owningTopologyKind, + configManagerGetter: configManagerGetter, + } +} + +// ServiceFabricReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating the "fabric" service for a +// clabernetes topology resource. +type ServiceFabricReconciler struct { + log claberneteslogging.Instance + owningTopologyKind string + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Resolve accepts a mapping of clabernetes configs and a list of services that are -- by owner +// reference and/or labels -- associated with the topology. It returns a ObjectDiffer object +// that contains the missing, extra, and current services for the topology. +func (r *ServiceFabricReconciler) Resolve( + ownedServices *k8scorev1.ServiceList, + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, + _ clabernetesapistopologyv1alpha1.TopologyCommonObject, +) (*clabernetesutil.ObjectDiffer[*k8scorev1.Service], error) { + services := &clabernetesutil.ObjectDiffer[*k8scorev1.Service]{ + Current: map[string]*k8scorev1.Service{}, + } + + for i := range ownedServices.Items { + labels := ownedServices.Items[i].Labels + + if labels == nil { + return nil, fmt.Errorf( + "%w: labels are nil, but we expect to see topology owner label here", + claberneteserrors.ErrInvalidData, + ) + } + + topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] + + if topologyServiceType != clabernetesconstants.TopologyServiceTypeFabric { + // not the kind of service we're looking for here, we only care about the services + // used for connecting the nodes together here. + continue + } + + nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] + if !ok || nodeName == "" { + return nil, fmt.Errorf( + "%w: topology node label is missing or empty", + claberneteserrors.ErrInvalidData, + ) + } + + services.Current[nodeName] = &ownedServices.Items[i] + } + + allNodes := make([]string, len(clabernetesConfigs)) + + var nodeIdx int + + for nodeName := range clabernetesConfigs { + allNodes[nodeIdx] = nodeName + + nodeIdx++ + } + + services.SetMissing(allNodes) + services.SetExtra(allNodes) + + return services, nil +} + +func (r *ServiceFabricReconciler) renderServiceBase( + name, + namespace, + owningTopologyName, + nodeName string, +) *k8scorev1.Service { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + deploymentName := fmt.Sprintf("%s-%s", owningTopologyName, nodeName) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: deploymentName, + clabernetesconstants.LabelTopologyOwner: owningTopologyName, + clabernetesconstants.LabelTopologyNode: nodeName, + } + + labels := map[string]string{ + clabernetesconstants.LabelTopologyKind: r.owningTopologyKind, + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + + } + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + return &k8scorev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Name: "vxlan", + Protocol: clabernetesconstants.UDP, + Port: clabernetesconstants.VXLANServicePort, + TargetPort: intstr.IntOrString{ + IntVal: clabernetesconstants.VXLANServicePort, + }, + }, + }, + Selector: selectorLabels, + Type: clabernetesconstants.KubernetesServiceClusterIPType, + }, + } +} + +// Render accepts the owning topology a mapping of clabernetes sub-topology configs and a node name +// and renders the final fabric service for this node. +func (r *ServiceFabricReconciler) Render( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + nodeName string, +) *k8scorev1.Service { + owningTopologyName := owningTopology.GetName() + + service := r.renderServiceBase( + fmt.Sprintf("%s-%s-vx", owningTopologyName, nodeName), + owningTopology.GetNamespace(), + owningTopologyName, + nodeName, + ) + + return service +} + +// RenderAll accepts the owning topology a mapping of clabernetes sub-topology configs and a +// list of node names and renders the final fabric services for the given nodes. +func (r *ServiceFabricReconciler) RenderAll( + owningTopology clabernetesapistopologyv1alpha1.TopologyCommonObject, + nodeNames []string, +) []*k8scorev1.Service { + services := make([]*k8scorev1.Service, len(nodeNames)) + + for idx, nodeName := range nodeNames { + services[idx] = r.Render( + owningTopology, + nodeName, + ) + } + + return services +} + +// Conforms checks if the existingService conforms with the renderedService. +func (r *ServiceFabricReconciler) Conforms( + existingService, + renderedService *k8scorev1.Service, + expectedOwnerUID apimachinerytypes.UID, +) bool { + return ServiceConforms(existingService, renderedService, expectedOwnerUID) +} diff --git a/controllers/topology/reconciler/servicefabric_test.go b/controllers/topology/reconciler/servicefabric_test.go new file mode 100644 index 00000000..25710c03 --- /dev/null +++ b/controllers/topology/reconciler/servicefabric_test.go @@ -0,0 +1,217 @@ +package reconciler_test + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" + + clabernetesapistopology "github.com/srl-labs/clabernetes/apis/topology" + clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetescontrollerstopologyreconciler "github.com/srl-labs/clabernetes/controllers/topology/reconciler" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const renderServiceFabricTestName = "servicefabric/render-service" + +func TestResolveServiceFabric(t *testing.T) { + cases := []struct { + name string + ownedServices *k8scorev1.ServiceList + clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config + expectedCurrent []string + expectedMissing []string + expectedExtra []*k8scorev1.Service + }{ + { + name: "simple", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: nil, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "missing-nodes", + ownedServices: &k8scorev1.ServiceList{}, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + "node2": nil, + }, + expectedCurrent: nil, + expectedMissing: []string{"node1", "node2"}, + expectedExtra: []*k8scorev1.Service{}, + }, + { + name: "extra-nodes", + ownedServices: &k8scorev1.ServiceList{ + Items: []k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + clabernetesConfigs: map[string]*clabernetesutilcontainerlab.Config{ + "node1": nil, + }, + expectedCurrent: nil, + expectedMissing: nil, + expectedExtra: []*k8scorev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolve-servicefabric-test", + Namespace: "clabernetes", + Labels: map[string]string{ + clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll + clabernetesconstants.LabelTopologyNode: "node2", + }, + }, + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceFabricReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got, err := reconciler.Resolve( + testCase.ownedServices, + testCase.clabernetesConfigs, + nil, + ) + if err != nil { + t.Fatal(err) + } + + var gotCurrent []string + + for current := range got.Current { + gotCurrent = append(gotCurrent, current) + } + + if !clabernetesutil.StringSliceContainsAll(gotCurrent, testCase.expectedCurrent) { + clabernetestesthelper.FailOutput(t, gotCurrent, testCase.expectedCurrent) + } + + if !clabernetesutil.StringSliceContainsAll(got.Missing, testCase.expectedMissing) { + clabernetestesthelper.FailOutput(t, got.Missing, testCase.expectedMissing) + } + + if !reflect.DeepEqual(got.Extra, testCase.expectedExtra) { + clabernetestesthelper.FailOutput(t, got.Extra, testCase.expectedExtra) + } + }) + } +} + +func TestRenderServiceFabric(t *testing.T) { + cases := []struct { + name string + owningTopologyObject clabernetesapistopologyv1alpha1.TopologyCommonObject + nodeName string + }{ + { + name: "simple", + owningTopologyObject: &clabernetesapistopologyv1alpha1.Containerlab{ + ObjectMeta: metav1.ObjectMeta{ + Name: "render-service-fabric-test", + Namespace: "clabernetes", + }, + Spec: clabernetesapistopologyv1alpha1.ContainerlabSpec{ + TopologyCommonSpec: clabernetesapistopologyv1alpha1.TopologyCommonSpec{}, + Config: `--- + name: test + topology: + nodes: + srl1: + kind: srl + image: ghcr.io/nokia/srlinux +`, + }, + }, + nodeName: "srl1", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + reconciler := clabernetescontrollerstopologyreconciler.NewServiceFabricReconciler( + &claberneteslogging.FakeInstance{}, + clabernetesapistopology.Containerlab, + clabernetesconfig.GetFakeManager, + ) + + got := reconciler.Render( + testCase.owningTopologyObject, + testCase.nodeName, + ) + + if *clabernetestesthelper.Update { + clabernetestesthelper.WriteTestFixtureJSON( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceFabricTestName, + testCase.name, + ), + got, + ) + } + + var want k8scorev1.Service + + err := json.Unmarshal( + clabernetestesthelper.ReadTestFixtureFile( + t, + fmt.Sprintf( + "golden/%s/%s.json", + renderServiceFabricTestName, + testCase.name, + ), + ), + &want, + ) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got.Annotations, want.Annotations) { + clabernetestesthelper.FailOutput(t, got.Annotations, want.Annotations) + } + if !reflect.DeepEqual(got.Labels, want.Labels) { + clabernetestesthelper.FailOutput(t, got.Labels, want.Labels) + } + if !reflect.DeepEqual(got.Spec, want.Spec) { + clabernetestesthelper.FailOutput(t, got.Spec, want.Spec) + } + }) + } +} diff --git a/controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json b/controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json similarity index 100% rename from controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json rename to controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-no-links.json diff --git a/controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json b/controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json similarity index 100% rename from controllers/topology/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json rename to controllers/topology/reconciler/test-fixtures/golden/configmap/render-config-map/basic-two-node-with-links.json diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json new file mode 100755 index 00000000..68960e9f --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json @@ -0,0 +1,102 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + }, + { + "name": "LAUNCHER_CONTAINERLAB_DEBUG", + "value": "true" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json new file mode 100755 index 00000000..55840dcb --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/insecure-registries.json @@ -0,0 +1,102 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + }, + { + "name": "LAUNCHER_INSECURE_REGISTRIES", + "value": "1.2.3.4,potato.com" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json new file mode 100755 index 00000000..07eba9a8 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json @@ -0,0 +1,98 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "debug" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json new file mode 100755 index 00000000..f8f9ccb1 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/deployment/render-deployment/simple.json @@ -0,0 +1,98 @@ +{ + "metadata": { + "name": "render-deployment-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-deployment-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-deployment-test" + } + }, + "spec": { + "volumes": [ + { + "name": "render-deployment-test-config", + "configMap": { + "name": "render-deployment-test" + } + } + ], + "containers": [ + { + "name": "srl1", + "image": "ghcr.io/srl-labs/clabernetes/clabernetes-launcher:latest", + "command": [ + "/clabernetes/manager", + "launch" + ], + "workingDir": "/clabernetes", + "ports": [ + { + "name": "vxlan", + "containerPort": 14789, + "protocol": "UDP" + } + ], + "env": [ + { + "name": "LAUNCHER_LOGGER_LEVEL", + "value": "info" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/topo.clab.yaml", + "subPath": "srl1" + }, + { + "name": "render-deployment-test-config", + "readOnly": true, + "mountPath": "/clabernetes/tunnels.yaml", + "subPath": "srl1-tunnels" + } + ], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "privileged": true, + "runAsUser": 0 + } + } + ], + "restartPolicy": "Always", + "serviceAccountName": "default" + } + }, + "strategy": {}, + "revisionHistoryLimit": 0 + }, + "status": {} +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json new file mode 100755 index 00000000..fb411a8e --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple-status.json @@ -0,0 +1,30 @@ +{ + "configs": "", + "configsHash": "", + "tunnels": {}, + "tunnelsHash": "", + "nodeExposedPorts": { + "srl1": { + "loadBalancerAddress": "", + "tcpPorts": [ + 22, + 23, + 57400, + 21, + 80, + 443, + 830, + 5000, + 5900, + 6030, + 9339, + 9340, + 9559 + ], + "udpPorts": [ + 161 + ] + } + }, + "nodeExposedPortsHash": "" +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json new file mode 100755 index 00000000..417f7587 --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/serviceexpose/render-service/simple.json @@ -0,0 +1,113 @@ +{ + "metadata": { + "name": "render-service-expose-test-srl1", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-expose-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-expose-test", + "clabernetes/topologyServiceType": "expose" + } + }, + "spec": { + "ports": [ + { + "name": "port-22-tcp", + "protocol": "TCP", + "port": 22, + "targetPort": 21022 + }, + { + "name": "port-23-tcp", + "protocol": "TCP", + "port": 23, + "targetPort": 21023 + }, + { + "name": "port-161-udp", + "protocol": "UDP", + "port": 161, + "targetPort": 21161 + }, + { + "name": "port-57400-tcp", + "protocol": "TCP", + "port": 57400, + "targetPort": 33333 + }, + { + "name": "port-21-tcp", + "protocol": "TCP", + "port": 21, + "targetPort": 60000 + }, + { + "name": "port-80-tcp", + "protocol": "TCP", + "port": 80, + "targetPort": 60001 + }, + { + "name": "port-443-tcp", + "protocol": "TCP", + "port": 443, + "targetPort": 60002 + }, + { + "name": "port-830-tcp", + "protocol": "TCP", + "port": 830, + "targetPort": 60003 + }, + { + "name": "port-5000-tcp", + "protocol": "TCP", + "port": 5000, + "targetPort": 60004 + }, + { + "name": "port-5900-tcp", + "protocol": "TCP", + "port": 5900, + "targetPort": 60005 + }, + { + "name": "port-6030-tcp", + "protocol": "TCP", + "port": 6030, + "targetPort": 60006 + }, + { + "name": "port-9339-tcp", + "protocol": "TCP", + "port": 9339, + "targetPort": 60007 + }, + { + "name": "port-9340-tcp", + "protocol": "TCP", + "port": 9340, + "targetPort": 60008 + }, + { + "name": "port-9559-tcp", + "protocol": "TCP", + "port": 9559, + "targetPort": 60009 + } + ], + "selector": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-expose-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-expose-test" + }, + "type": "LoadBalancer" + }, + "status": { + "loadBalancer": {} + } +} \ No newline at end of file diff --git a/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json b/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json new file mode 100755 index 00000000..9b97b36c --- /dev/null +++ b/controllers/topology/reconciler/test-fixtures/golden/servicefabric/render-service/simple.json @@ -0,0 +1,35 @@ +{ + "metadata": { + "name": "render-service-fabric-test-srl1-vx", + "namespace": "clabernetes", + "creationTimestamp": null, + "labels": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-fabric-test-srl1", + "clabernetes/topologyKind": "containerlab", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-fabric-test", + "clabernetes/topologyServiceType": "fabric" + } + }, + "spec": { + "ports": [ + { + "name": "vxlan", + "protocol": "UDP", + "port": 14789, + "targetPort": 14789 + } + ], + "selector": { + "clabernetes/app": "clabernetes", + "clabernetes/name": "render-service-fabric-test-srl1", + "clabernetes/topologyNode": "srl1", + "clabernetes/topologyOwner": "render-service-fabric-test" + }, + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } +} \ No newline at end of file diff --git a/controllers/topology/serviceexpose.go b/controllers/topology/serviceexpose.go deleted file mode 100644 index 54bafbf1..00000000 --- a/controllers/topology/serviceexpose.go +++ /dev/null @@ -1,359 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "sort" - "strings" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesapistopologyv1alpha1 "github.com/srl-labs/clabernetes/apis/topology/v1alpha1" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveExposeServices( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedServices, error) { - ownedServices := &k8scorev1.ServiceList{} - - err := r.Client.List( - ctx, - ownedServices, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned expose services, error: '%s'", err) - - return nil, err - } - - services := &clabernetescontrollers.ResolvedServices{ - Current: map[string]*k8scorev1.Service{}, - } - - for i := range ownedServices.Items { - labels := ownedServices.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] - - if topologyServiceType != clabernetesconstants.TopologyServiceTypeExpose { - // not the kind of service we're looking for here, we only care about the services - // used for exposing nodes here. - continue - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - services.Current[nodeName] = &ownedServices.Items[i] - } - - commonTopologySpec := obj.GetTopologyCommonSpec() - - exposedNodes := make([]string, 0) - - for nodeName, nodeData := range clabernetesConfigs { - // if disable auto expose is true *and* there are no ports defined for the node *and* - // there are no default ports defined for the topology we can skip the node from an expose - // perspective. - if commonTopologySpec.DisableAutoExpose && - len(nodeData.Topology.Nodes[nodeName].Ports) == 0 && - len(nodeData.Topology.Defaults.Ports) == 0 { - continue - } - - exposedNodes = append(exposedNodes, nodeName) - } - - services.Missing = clabernetesutil.StringSliceDifference( - services.CurrentServiceNames(), - exposedNodes, - ) - - r.Log.Debugf( - "expose services are missing for the following nodes: %s", - services.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - exposedNodes, - services.CurrentServiceNames(), - ) - - r.Log.Debugf( - "extraneous expose services exist for following nodes: %s", - extraEndpointDeployments, - ) - - services.Extra = make([]*k8scorev1.Service, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - services.Extra[idx] = services.Current[endpoint] - } - - return services, nil -} - -func (r *Reconciler) pruneExposeServices( - ctx context.Context, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("pruning extraneous expose services") - - for _, extraDeployment := range services.Extra { - r.Log.Debugf( - "removing expose service '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing expose service '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceExposeServices( - ctx context.Context, - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - objTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("creating missing expose services") - - for _, nodeName := range services.Missing { - service := r.renderExposeService( - obj, - objTopologyStatus, - clabernetesConfigs, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating expose service '%s/%s'", - service.Namespace, - service.Name, - ) - - err = r.Client.Create(ctx, service) - if err != nil { - r.Log.Criticalf( - "failed creating expose service '%s/%s' error: %s", - service.Namespace, - service.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing expose services") - - for nodeName, service := range services.Current { - r.Log.Debugf( - "comparing existing expose service '%s/%s' to desired state", - service.Namespace, - service.Name, - ) - - expectedService := r.renderExposeService( - obj, - objTopologyStatus, - clabernetesConfigs, - nodeName, - ) - - if len(service.Status.LoadBalancer.Ingress) == 1 { - // can/would this ever be more than 1? i dunno? - address := service.Status.LoadBalancer.Ingress[0].IP - if address != "" { - objTopologyStatus.NodeExposedPorts[nodeName].LoadBalancerAddress = address - } - } - - err := ctrlruntimeutil.SetOwnerReference(obj, expectedService, r.Client.Scheme()) - if err != nil { - return err - } - - if !ServiceConforms(service, expectedService, obj.GetUID()) { - r.Log.Debugf( - "comparing existing expose service '%s/%s' spec does not conform to desired "+ - "state, updating", - service.Namespace, - service.Name, - ) - - err = r.Client.Update(ctx, expectedService) - if err != nil { - r.Log.Criticalf( - "failed updating expose service '%s/%s' error: %s", - expectedService.Namespace, - expectedService.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) parseContainerlabTopologyPortsSection( - portDefinition string, -) (bool, *k8scorev1.ServicePort) { - typedPort, err := clabernetesutilcontainerlab.ProcessPortDefinition(portDefinition) - if err != nil { - r.Log.Warnf("skipping port due to the following error: %s", err) - - return true, nil - } - - return false, &k8scorev1.ServicePort{ - Name: fmt.Sprintf( - "port-%d-%s", typedPort.DestinationPort, strings.ToLower(typedPort.Protocol), - ), - Protocol: k8scorev1.Protocol(typedPort.Protocol), - Port: int32(typedPort.DestinationPort), - TargetPort: intstr.IntOrString{ - IntVal: int32(typedPort.ExposePort), - }, - } -} - -func (r *Reconciler) renderExposeService( - obj clabernetesapistopologyv1alpha1.TopologyCommonObject, - objTopologyStatus *clabernetesapistopologyv1alpha1.TopologyStatus, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, - nodeName string, -) *k8scorev1.Service { - configManager := r.ConfigManagerGetter() - globalAnnotations, globalLabels := configManager.GetAllMetadata() - - name := obj.GetName() - - objTopologyStatus.NodeExposedPorts[nodeName] = &clabernetesapistopologyv1alpha1.ExposedPorts{ - TCPPorts: make([]int, 0), - UDPPorts: make([]int, 0), - } - - serviceName := fmt.Sprintf("%s-%s", name, nodeName) - - labels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: serviceName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, - clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeExpose, //nolint:lll - } - - for k, v := range globalLabels { - labels[k] = v - } - - ports := make([]k8scorev1.ServicePort, 0) - - // for actual containerlab configs we copy the users given defaults into each "sub topology" -- - // so in the case of containerlab we want to make sure we also iterate over the "default" or - // topology wide ports that were specified. in this process we dont want to duplicate things, so - // we use a simple set implementation to make sure we aren't doubling up on any port - // definitions. - allContainerlabPorts := clabernetesutil.NewStringSet() - - allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Nodes[nodeName].Ports) - - allContainerlabPorts.Extend(clabernetesConfigs[nodeName].Topology.Defaults.Ports) - - allContainerlabPortsItems := allContainerlabPorts.Items() - sort.Strings(allContainerlabPortsItems) - - for _, portDefinition := range allContainerlabPortsItems { - shouldSkip, port := r.parseContainerlabTopologyPortsSection(portDefinition) - - if shouldSkip { - continue - } - - ports = append(ports, *port) - - // dont forget to update the exposed ports status bits - if port.Protocol == clabernetesconstants.TCP { - objTopologyStatus.NodeExposedPorts[nodeName].TCPPorts = append( - objTopologyStatus.NodeExposedPorts[nodeName].TCPPorts, - int(port.Port), - ) - } else { - objTopologyStatus.NodeExposedPorts[nodeName].UDPPorts = append( - objTopologyStatus.NodeExposedPorts[nodeName].UDPPorts, - int(port.Port), - ) - } - } - - service := &k8scorev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: obj.GetNamespace(), - Annotations: globalAnnotations, - Labels: labels, - }, - Spec: k8scorev1.ServiceSpec{ - Ports: ports, - Selector: map[string]string{ - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - }, - Type: "LoadBalancer", - }, - } - - return service -} diff --git a/controllers/topology/servicefabric.go b/controllers/topology/servicefabric.go deleted file mode 100644 index 3fd77d68..00000000 --- a/controllers/topology/servicefabric.go +++ /dev/null @@ -1,267 +0,0 @@ -package topology - -import ( - "context" - "fmt" - - clabernetesutilcontainerlab "github.com/srl-labs/clabernetes/util/containerlab" - - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" - claberneteserrors "github.com/srl-labs/clabernetes/errors" - clabernetesutil "github.com/srl-labs/clabernetes/util" - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func (r *Reconciler) resolveFabricServices( - ctx context.Context, - obj ctrlruntimeclient.Object, - clabernetesConfigs map[string]*clabernetesutilcontainerlab.Config, -) (*clabernetescontrollers.ResolvedServices, error) { - ownedServices := &k8scorev1.ServiceList{} - - err := r.Client.List( - ctx, - ownedServices, - ctrlruntimeclient.InNamespace(obj.GetNamespace()), - ctrlruntimeclient.MatchingLabels{ - clabernetesconstants.LabelTopologyOwner: obj.GetName(), - }, - ) - if err != nil { - r.Log.Criticalf("failed fetching owned services, error: '%s'", err) - - return nil, err - } - - services := &clabernetescontrollers.ResolvedServices{ - Current: map[string]*k8scorev1.Service{}, - } - - for i := range ownedServices.Items { - labels := ownedServices.Items[i].Labels - - if labels == nil { - return nil, fmt.Errorf( - "%w: labels are nil, but we expect to see topology owner label here", - claberneteserrors.ErrInvalidData, - ) - } - - topologyServiceType := labels[clabernetesconstants.LabelTopologyServiceType] - - if topologyServiceType != clabernetesconstants.TopologyServiceTypeFabric { - // not the kind of service we're looking for here, we only care about the services - // used for connecting the nodes together here. - continue - } - - nodeName, ok := labels[clabernetesconstants.LabelTopologyNode] - if !ok || nodeName == "" { - return nil, fmt.Errorf( - "%w: topology node label is missing or empty", - claberneteserrors.ErrInvalidData, - ) - } - - services.Current[nodeName] = &ownedServices.Items[i] - } - - allNodes := make([]string, len(clabernetesConfigs)) - - var nodeIdx int - - for nodeName := range clabernetesConfigs { - allNodes[nodeIdx] = nodeName - - nodeIdx++ - } - - services.Missing = clabernetesutil.StringSliceDifference( - services.CurrentServiceNames(), - allNodes, - ) - - r.Log.Debugf( - "services are missing for the following nodes: %s", - services.Missing, - ) - - extraEndpointDeployments := clabernetesutil.StringSliceDifference( - allNodes, - services.CurrentServiceNames(), - ) - - r.Log.Debugf( - "extraneous services exist for following nodes: %s", - extraEndpointDeployments, - ) - - services.Extra = make([]*k8scorev1.Service, len(extraEndpointDeployments)) - - for idx, endpoint := range extraEndpointDeployments { - services.Extra[idx] = services.Current[endpoint] - } - - return services, nil -} - -func (r *Reconciler) pruneFabricServices( - ctx context.Context, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("pruning extraneous services") - - for _, extraDeployment := range services.Extra { - r.Log.Debugf( - "removing service '%s/%s'", - extraDeployment.Namespace, - extraDeployment.Name, - ) - - err := r.Client.Delete(ctx, extraDeployment) - if err != nil { - r.Log.Criticalf( - "failed removing service '%s/%s' error: %s", - extraDeployment.Namespace, - extraDeployment.Name, - err, - ) - - return err - } - } - - return nil -} - -func (r *Reconciler) enforceFabricServices( - ctx context.Context, - obj ctrlruntimeclient.Object, - services *clabernetescontrollers.ResolvedServices, -) error { - r.Log.Info("creating missing services") - - for _, nodeName := range services.Missing { - service := r.renderFabricService( - obj, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - r.Log.Debugf( - "creating service '%s/%s'", - service.Namespace, - service.Name, - ) - - err = r.Client.Create(ctx, service) - if err != nil { - r.Log.Criticalf( - "failed creating service '%s/%s' error: %s", - service.Namespace, - service.Name, - err, - ) - - return err - } - } - - // compare and update existing deployments if we need to - r.Log.Info("enforcing desired state on existing services") - - for nodeName, service := range services.Current { - r.Log.Debugf( - "comparing existing service '%s/%s' to desired state", - service.Namespace, - service.Name, - ) - - expectedService := r.renderFabricService( - obj, - nodeName, - ) - - err := ctrlruntimeutil.SetOwnerReference(obj, service, r.Client.Scheme()) - if err != nil { - return err - } - - if !ServiceConforms(service, expectedService, obj.GetUID()) { - r.Log.Debugf( - "comparing existing service '%s/%s' spec does not conform to desired state, "+ - "updating", - service.Namespace, - service.Name, - ) - - err = r.Client.Update(ctx, expectedService) - if err != nil { - r.Log.Criticalf( - "failed updating service '%s/%s' error: %s", - expectedService.Namespace, - expectedService.Name, - err, - ) - - return err - } - } - } - - return nil -} - -func (r *Reconciler) renderFabricService( - obj ctrlruntimeclient.Object, - nodeName string, -) *k8scorev1.Service { - name := obj.GetName() - - serviceName := fmt.Sprintf("%s-%s-vx", name, nodeName) - - labels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: serviceName, - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - clabernetesconstants.LabelTopologyKind: r.ResourceKind, - clabernetesconstants.LabelTopologyServiceType: clabernetesconstants.TopologyServiceTypeFabric, //nolint:lll - } - - service := &k8scorev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: obj.GetNamespace(), - Labels: labels, - }, - Spec: k8scorev1.ServiceSpec{ - Ports: []k8scorev1.ServicePort{ - { - Name: "vxlan", - Protocol: "UDP", - Port: clabernetesconstants.VXLANServicePort, - TargetPort: intstr.IntOrString{ - IntVal: clabernetesconstants.VXLANServicePort, - }, - }, - }, - Selector: map[string]string{ - clabernetesconstants.LabelTopologyOwner: name, - clabernetesconstants.LabelTopologyNode: nodeName, - }, - Type: "ClusterIP", - }, - } - - return service -} diff --git a/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml b/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml index 0d40032f..e91fedc5 100644 --- a/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml +++ b/e2e/topology/containerlab/basic/test-fixtures/golden/10-service.containerlab-basic-srl1.yaml @@ -79,6 +79,8 @@ spec: protocol: TCP targetPort: 60012 selector: + clabernetes/app: clabernetes + clabernetes/name: containerlab-basic-srl1 clabernetes/topologyNode: srl1 clabernetes/topologyOwner: containerlab-basic sessionAffinity: None diff --git a/logging/fake.go b/logging/fake.go new file mode 100644 index 00000000..cb732da5 --- /dev/null +++ b/logging/fake.go @@ -0,0 +1,38 @@ +package logging + +var _ Instance = (*FakeInstance)(nil) + +// FakeInstance is a fake logging instance that does nothing. +type FakeInstance struct{} + +func (i *FakeInstance) Debug(f string) {} + +func (i *FakeInstance) Debugf(f string, a ...interface{}) {} + +func (i *FakeInstance) Info(f string) {} + +func (i *FakeInstance) Infof(f string, a ...interface{}) {} + +func (i *FakeInstance) Warn(f string) {} + +func (i *FakeInstance) Warnf(f string, a ...interface{}) {} + +func (i *FakeInstance) Critical(f string) {} + +func (i *FakeInstance) Criticalf(f string, a ...interface{}) {} + +func (i *FakeInstance) Fatal(f string) {} + +func (i *FakeInstance) Fatalf(f string, a ...interface{}) {} + +func (i *FakeInstance) Write(p []byte) (n int, err error) { + return 0, nil +} + +func (i *FakeInstance) GetName() string { + return "" +} + +func (i *FakeInstance) GetLevel() string { + return "" +} diff --git a/manager/clabernetes.go b/manager/clabernetes.go index 61bbdb54..2dcb2701 100644 --- a/manager/clabernetes.go +++ b/manager/clabernetes.go @@ -6,6 +6,8 @@ import ( "os" "time" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" clabernetesconstants "github.com/srl-labs/clabernetes/constants" @@ -160,7 +162,7 @@ func (c *clabernetes) startup() { var err error - c.namespace, err = clabernetesutil.CurrentNamespace() + c.namespace, err = clabernetesutilkubernetes.CurrentNamespace() if err != nil { c.logger.Criticalf("failed getting current namespace, err: %s", err) diff --git a/manager/election/util.go b/manager/election/util.go index 538bd164..e9ab6d31 100644 --- a/manager/election/util.go +++ b/manager/election/util.go @@ -3,6 +3,8 @@ package election import ( "os" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" ) @@ -16,14 +18,14 @@ const ( func GenerateLeaderIdentity() string { hostname, err := os.Hostname() if err == nil { - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "clabernetes", hostname, clabernetesutil.RandomString(unknownHostnameRandomNameLen), ) } - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "clabernetes", clabernetesutil.RandomString(unknownHostnameRandomNameLen), ) diff --git a/testhelper/kubernetes.go b/testhelper/kubernetes.go index 68a9c243..4ca307b5 100644 --- a/testhelper/kubernetes.go +++ b/testhelper/kubernetes.go @@ -3,6 +3,8 @@ package testhelper import ( "testing" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + clabernetesutil "github.com/srl-labs/clabernetes/util" ) @@ -48,7 +50,7 @@ func NormalizeKubernetesObject(t *testing.T, object []byte) []byte { // NewTestNamespace generates a namespace for a test. func NewTestNamespace(testName string) string { - return clabernetesutil.SafeConcatNameKubernetes( + return clabernetesutilkubernetes.SafeConcatNameKubernetes( "e2e", testName, clabernetesutil.RandomString(namespaceRandomPad), diff --git a/util/kubernetes.go b/util/kubernetes.go deleted file mode 100644 index ffdd5781..00000000 --- a/util/kubernetes.go +++ /dev/null @@ -1,115 +0,0 @@ -package util - -import ( - "crypto/sha256" - "encoding/hex" - "os" - "strings" - - "gopkg.in/yaml.v3" - k8scorev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -const ( - // NameMaxLen is the maximum length for a kubernetes name. - NameMaxLen = 63 -) - -// CurrentNamespace returns the current kubernetes namespace as read from the KUBE_NAMESPACE env -// var, or the serviceaccount/namespace file on the instance. -func CurrentNamespace() (string, error) { - namespaceFromEnv := os.Getenv("KUBE_NAMESPACE") - if namespaceFromEnv != "" { - return namespaceFromEnv, nil - } - - namespaceFromFile, err := os.ReadFile( - "/var/run/secrets/kubernetes.io/serviceaccount/namespace", - ) - if err != nil { - return "", err - } - - return string(namespaceFromFile), nil -} - -// MustCurrentNamespace returns the current kubernetes namespace or panics. -func MustCurrentNamespace() string { - namespace, err := CurrentNamespace() - if err != nil { - Panic(err.Error()) - } - - return namespace -} - -// SafeConcatNameKubernetes concats all provided strings into a string joined by "-" - if the final -// string is greater than 63 characters, the string will be shortened, and a hash will be used at -// the end of the string to keep it unique, but safely within allowed lengths. -func SafeConcatNameKubernetes(name ...string) string { - return SafeConcatNameMax(name, NameMaxLen) -} - -// SafeConcatNameMax concats all provided strings into a string joined by "-" - if the final string -// is greater than max characters, the string will be shortened, and a hash will be used at the end -// of the string to keep it unique, but safely within allowed lengths. -func SafeConcatNameMax(name []string, max int) string { - finalName := strings.Join(name, "-") - - if len(finalName) <= max { - return finalName - } - - digest := sha256.Sum256([]byte(finalName)) - - return finalName[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] -} - -type resourceQuantities struct { - CPU string `yaml:"cpu"` - Memory string `yaml:"memory"` -} - -type resourceRequirements struct { - Requests resourceQuantities `yaml:"requests"` - Limits resourceQuantities `yaml:"limits"` -} - -func (r *resourceRequirements) toK8sResourceRequirements() *k8scorev1.ResourceRequirements { - out := &k8scorev1.ResourceRequirements{ - Limits: map[k8scorev1.ResourceName]resource.Quantity{}, - Requests: map[k8scorev1.ResourceName]resource.Quantity{}, - } - - if r.Requests.Memory != "" { - out.Requests["memory"] = resource.MustParse(r.Requests.Memory) - } - - if r.Requests.CPU != "" { - out.Requests["cpu"] = resource.MustParse(r.Requests.CPU) - } - - if r.Limits.Memory != "" { - out.Limits["memory"] = resource.MustParse(r.Limits.Memory) - } - - if r.Limits.CPU != "" { - out.Limits["cpu"] = resource.MustParse(r.Limits.CPU) - } - - return out -} - -// YAMLToK8sResourceRequirements accepts a yaml string that looks suspiciously like k8s resources -// for a container and converts it to k8scorev1.ResourceRequirements. -func YAMLToK8sResourceRequirements(asYAML string) (*k8scorev1.ResourceRequirements, error) { - out := &resourceRequirements{} - - err := yaml.Unmarshal([]byte(asYAML), out) - if err != nil { - return nil, err - } - - return out.toK8sResourceRequirements(), nil -} diff --git a/util/kubernetes/containers.go b/util/kubernetes/containers.go new file mode 100644 index 00000000..13643577 --- /dev/null +++ b/util/kubernetes/containers.go @@ -0,0 +1,33 @@ +package kubernetes + +import ( + "reflect" + + k8scorev1 "k8s.io/api/core/v1" +) + +// ContainersEqual returns true if the existing container slice matches the rendered container slice +// it ignores slice order. +func ContainersEqual(existing, rendered []k8scorev1.Container) bool { + if len(existing) != len(rendered) { + return false + } + + for existingIdx := range existing { + var matched bool + + for renderedIdx := range rendered { + if reflect.DeepEqual(existing[existingIdx], rendered[renderedIdx]) { + matched = true + + break + } + } + + if !matched { + return false + } + } + + return true +} diff --git a/util/kubernetes/containers_test.go b/util/kubernetes/containers_test.go new file mode 100644 index 00000000..1e0d5f61 --- /dev/null +++ b/util/kubernetes/containers_test.go @@ -0,0 +1,62 @@ +package kubernetes_test + +import ( + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" +) + +func TestContainersEqual(t *testing.T) { + cases := []struct { + name string + a []k8scorev1.Container + b []k8scorev1.Container + expected bool + }{ + { + name: "simple-empty", + a: []k8scorev1.Container{}, + b: []k8scorev1.Container{}, + expected: true, + }, + { + name: "simple", + a: []k8scorev1.Container{ + { + Name: "something", + }, + }, + b: []k8scorev1.Container{ + { + Name: "something", + }, + }, + expected: true, + }, + { + name: "different-counts", + a: []k8scorev1.Container{ + { + Name: "something", + }, + }, + b: []k8scorev1.Container{}, + expected: false, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.ContainersEqual(testCase.a, testCase.b) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/controllers/meta.go b/util/kubernetes/meta.go similarity index 97% rename from controllers/meta.go rename to util/kubernetes/meta.go index 6f8a6a9d..1eba55b7 100644 --- a/controllers/meta.go +++ b/util/kubernetes/meta.go @@ -1,4 +1,4 @@ -package controllers +package kubernetes // AnnotationsOrLabelsConform returns false if the existing labels/annotations (or really just map) // does *not* have all the keys/values from the expected/rendered labels/annotations. diff --git a/util/kubernetes/meta_test.go b/util/kubernetes/meta_test.go new file mode 100644 index 00000000..cf1d6e32 --- /dev/null +++ b/util/kubernetes/meta_test.go @@ -0,0 +1,82 @@ +package kubernetes_test + +import ( + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" +) + +func TestContainersEqualAnnotationsOrLabelsConform(t *testing.T) { + cases := []struct { + name string + a map[string]string + b map[string]string + expected bool + }{ + { + name: "simple-empty", + a: nil, + b: nil, + expected: true, + }, + { + name: "simple", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "something": "neat", + }, + expected: true, + }, + { + name: "different-keys", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "different": "neat", + }, + expected: false, + }, + { + name: "expected-has-more", + a: map[string]string{ + "something": "neat", + }, + b: map[string]string{ + "something": "neat", + "different": "neat", + }, + expected: false, + }, + { + name: "existing-has-more", + a: map[string]string{ + "something": "neat", + "different": "neat", + }, + b: map[string]string{ + "something": "neat", + }, + expected: true, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.AnnotationsOrLabelsConform( + testCase.a, + testCase.b, + ) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/names.go b/util/kubernetes/names.go new file mode 100644 index 00000000..10287fad --- /dev/null +++ b/util/kubernetes/names.go @@ -0,0 +1,34 @@ +package kubernetes + +import ( + "crypto/sha256" + "encoding/hex" + "strings" +) + +const ( + // NameMaxLen is the maximum length for a kubernetes name. + NameMaxLen = 63 +) + +// SafeConcatNameKubernetes concats all provided strings into a string joined by "-" - if the final +// string is greater than 63 characters, the string will be shortened, and a hash will be used at +// the end of the string to keep it unique, but safely within allowed lengths. +func SafeConcatNameKubernetes(name ...string) string { + return SafeConcatNameMax(name, NameMaxLen) +} + +// SafeConcatNameMax concats all provided strings into a string joined by "-" - if the final string +// is greater than max characters, the string will be shortened, and a hash will be used at the end +// of the string to keep it unique, but safely within allowed lengths. +func SafeConcatNameMax(name []string, max int) string { + finalName := strings.Join(name, "-") + + if len(finalName) <= max { + return finalName + } + + digest := sha256.Sum256([]byte(finalName)) + + return finalName[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] +} diff --git a/util/kubernetes/names_test.go b/util/kubernetes/names_test.go new file mode 100644 index 00000000..616257ac --- /dev/null +++ b/util/kubernetes/names_test.go @@ -0,0 +1,96 @@ +package kubernetes_test + +import ( + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" +) + +func TestSafeConcatNameKubernetes(t *testing.T) { + cases := []struct { + name string + in []string + expected string + }{ + { + name: "simple", + in: []string{"afinename"}, + expected: "afinename", + }, + { + name: "simple-multi-word", + in: []string{"a", "fine", "name"}, + expected: "a-fine-name", + }, + { + name: "over-max-len", + in: []string{ + "a", + "fine", + "name", + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", //nolint:lll + }, + expected: "a-fine-name-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-8fa96d7", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.SafeConcatNameKubernetes(testCase.in...) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} + +func TestSafeConcatNameMax(t *testing.T) { + cases := []struct { + name string + in []string + max int + expected string + }{ + { + name: "simple", + in: []string{"afinename"}, + max: 30, + expected: "afinename", + }, + { + name: "simple-multi-word", + in: []string{"a", "fine", "name"}, + max: 30, + expected: "a-fine-name", + }, + { + name: "over-max-len", + in: []string{ + "a", + "fine", + "name", + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", //nolint:lll + }, + max: 30, + expected: "a-fine-name-xxxxxxxxxx-8fa96d7", + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual := clabernetesutilkubernetes.SafeConcatNameMax(testCase.in, testCase.max) + if actual != testCase.expected { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/namespace.go b/util/kubernetes/namespace.go new file mode 100644 index 00000000..bcdc9aa8 --- /dev/null +++ b/util/kubernetes/namespace.go @@ -0,0 +1,35 @@ +package kubernetes + +import ( + "os" + + clabernetesutil "github.com/srl-labs/clabernetes/util" +) + +// CurrentNamespace returns the current kubernetes namespace as read from the KUBE_NAMESPACE env +// var, or the serviceaccount/namespace file on the instance. +func CurrentNamespace() (string, error) { + namespaceFromEnv := os.Getenv("KUBE_NAMESPACE") + if namespaceFromEnv != "" { + return namespaceFromEnv, nil + } + + namespaceFromFile, err := os.ReadFile( + "/var/run/secrets/kubernetes.io/serviceaccount/namespace", + ) + if err != nil { + return "", err + } + + return string(namespaceFromFile), nil +} + +// MustCurrentNamespace returns the current kubernetes namespace or panics. +func MustCurrentNamespace() string { + namespace, err := CurrentNamespace() + if err != nil { + clabernetesutil.Panic(err.Error()) + } + + return namespace +} diff --git a/util/kubernetes/resources.go b/util/kubernetes/resources.go new file mode 100644 index 00000000..8e09fbb6 --- /dev/null +++ b/util/kubernetes/resources.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + "gopkg.in/yaml.v3" + k8scorev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type resourceQuantities struct { + CPU string `yaml:"cpu"` + Memory string `yaml:"memory"` +} + +type resourceRequirements struct { + Requests resourceQuantities `yaml:"requests"` + Limits resourceQuantities `yaml:"limits"` +} + +func (r *resourceRequirements) toK8sResourceRequirements() *k8scorev1.ResourceRequirements { + out := &k8scorev1.ResourceRequirements{ + Limits: map[k8scorev1.ResourceName]resource.Quantity{}, + Requests: map[k8scorev1.ResourceName]resource.Quantity{}, + } + + if r.Requests.Memory != "" { + out.Requests["memory"] = resource.MustParse(r.Requests.Memory) + } + + if r.Requests.CPU != "" { + out.Requests["cpu"] = resource.MustParse(r.Requests.CPU) + } + + if r.Limits.Memory != "" { + out.Limits["memory"] = resource.MustParse(r.Limits.Memory) + } + + if r.Limits.CPU != "" { + out.Limits["cpu"] = resource.MustParse(r.Limits.CPU) + } + + return out +} + +// YAMLToK8sResourceRequirements accepts a yaml string that looks suspiciously like k8s resources +// for a container and converts it to k8scorev1.ResourceRequirements. +func YAMLToK8sResourceRequirements(asYAML string) (*k8scorev1.ResourceRequirements, error) { + out := &resourceRequirements{} + + err := yaml.Unmarshal([]byte(asYAML), out) + if err != nil { + return nil, err + } + + return out.toK8sResourceRequirements(), nil +} diff --git a/util/kubernetes/resources_test.go b/util/kubernetes/resources_test.go new file mode 100644 index 00000000..933350dc --- /dev/null +++ b/util/kubernetes/resources_test.go @@ -0,0 +1,75 @@ +package kubernetes_test + +import ( + "reflect" + "testing" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestYAMLToK8sResourceRequirements(t *testing.T) { + cases := []struct { + name string + in string + expected *k8scorev1.ResourceRequirements + }{ + { + name: "simple", + in: `--- +requests: + memory: 128Mi + cpu: 50m +`, + expected: &k8scorev1.ResourceRequirements{ + Limits: k8scorev1.ResourceList{}, + Requests: k8scorev1.ResourceList{ + "memory": resource.MustParse("128Mi"), + "cpu": resource.MustParse("50m"), + }, + }, + }, + { + name: "simple", + in: `--- +requests: + memory: 128Mi + cpu: 50m +limits: + memory: 256Mi + cpu: 100m +`, + expected: &k8scorev1.ResourceRequirements{ + Limits: k8scorev1.ResourceList{ + "memory": resource.MustParse("256Mi"), + "cpu": resource.MustParse("100m"), + }, + Requests: k8scorev1.ResourceList{ + "memory": resource.MustParse("128Mi"), + "cpu": resource.MustParse("50m"), + }, + }, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + actual, err := clabernetesutilkubernetes.YAMLToK8sResourceRequirements(testCase.in) + if err != nil { + t.Fatalf( + "failed calling YAMLToK8sResourceRequirements, error: %s", err, + ) + } + + if !reflect.DeepEqual(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} diff --git a/util/kubernetes/volumes.go b/util/kubernetes/volumes.go new file mode 100644 index 00000000..f3815b7c --- /dev/null +++ b/util/kubernetes/volumes.go @@ -0,0 +1,14 @@ +package kubernetes + +import k8scorev1 "k8s.io/api/core/v1" + +// VolumeAlreadyMounted checks if the given volumeName is already in the existingVolumes. +func VolumeAlreadyMounted(volumeName string, existingVolumes []k8scorev1.Volume) bool { + for idx := range existingVolumes { + if volumeName == existingVolumes[idx].Name { + return true + } + } + + return false +} diff --git a/util/kubernetes/volumes_test.go b/util/kubernetes/volumes_test.go new file mode 100644 index 00000000..bdeb4e52 --- /dev/null +++ b/util/kubernetes/volumes_test.go @@ -0,0 +1 @@ +package kubernetes_test diff --git a/util/kubernetes_test.go b/util/kubernetes_test.go deleted file mode 100644 index 7128d8d8..00000000 --- a/util/kubernetes_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package util_test - -import ( - "reflect" - "testing" - - "k8s.io/apimachinery/pkg/api/resource" - - k8scorev1 "k8s.io/api/core/v1" - - clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" - clabernetesutil "github.com/srl-labs/clabernetes/util" -) - -func TestSafeConcatNameKubernetes(t *testing.T) { - cases := []struct { - name string - in []string - expected string - }{ - { - name: "simple", - in: []string{"afinename"}, - expected: "afinename", - }, - { - name: "simple-multi-word", - in: []string{"a", "fine", "name"}, - expected: "a-fine-name", - }, - { - name: "over-max-len", - in: []string{ - "a", - "fine", - "name", - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", //nolint:lll - }, - expected: "a-fine-name-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-8fa96d7", - }, - } - - for _, tc := range cases { - t.Logf("%s: starting", tc.name) - - actual := clabernetesutil.SafeConcatNameKubernetes(tc.in...) - if actual != tc.expected { - clabernetestesthelper.FailOutput(t, actual, tc.expected) - } - } -} - -func TestSafeConcatNameMax(t *testing.T) { - cases := []struct { - name string - in []string - max int - expected string - }{ - { - name: "simple", - in: []string{"afinename"}, - max: 30, - expected: "afinename", - }, - { - name: "simple-multi-word", - in: []string{"a", "fine", "name"}, - max: 30, - expected: "a-fine-name", - }, - { - name: "over-max-len", - in: []string{ - "a", - "fine", - "name", - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", //nolint:lll - }, - max: 30, - expected: "a-fine-name-xxxxxxxxxx-8fa96d7", - }, - } - - for _, tc := range cases { - t.Logf("%s: starting", tc.name) - - actual := clabernetesutil.SafeConcatNameMax(tc.in, tc.max) - if actual != tc.expected { - clabernetestesthelper.FailOutput(t, actual, tc.expected) - } - } -} - -func TestYAMLToK8sResourceRequirements(t *testing.T) { - cases := []struct { - name string - in string - expected *k8scorev1.ResourceRequirements - }{ - { - name: "simple", - in: `--- -requests: - memory: 128Mi - cpu: 50m -`, - expected: &k8scorev1.ResourceRequirements{ - Limits: k8scorev1.ResourceList{}, - Requests: k8scorev1.ResourceList{ - "memory": resource.MustParse("128Mi"), - "cpu": resource.MustParse("50m"), - }, - }, - }, - { - name: "simple", - in: `--- -requests: - memory: 128Mi - cpu: 50m -limits: - memory: 256Mi - cpu: 100m -`, - expected: &k8scorev1.ResourceRequirements{ - Limits: k8scorev1.ResourceList{ - "memory": resource.MustParse("256Mi"), - "cpu": resource.MustParse("100m"), - }, - Requests: k8scorev1.ResourceList{ - "memory": resource.MustParse("128Mi"), - "cpu": resource.MustParse("50m"), - }, - }, - }, - } - - for _, testCase := range cases { - t.Run( - testCase.name, - func(t *testing.T) { - t.Logf("%s: starting", testCase.name) - - actual, err := clabernetesutil.YAMLToK8sResourceRequirements(testCase.in) - if err != nil { - t.Fatalf( - "failed calling YAMLToK8sResourceRequirements, error: %s", err, - ) - } - - if !reflect.DeepEqual(actual, testCase.expected) { - clabernetestesthelper.FailOutput(t, actual, testCase.expected) - } - }) - } -} diff --git a/util/objectdiffer.go b/util/objectdiffer.go new file mode 100644 index 00000000..ca03ef70 --- /dev/null +++ b/util/objectdiffer.go @@ -0,0 +1,50 @@ +package util + +// ObjectDiffer holds objets of type T -- used for comparing current, missing, and extraneous +// objects in the cluster. +type ObjectDiffer[T any] struct { + // Current objects by endpoint name + Current map[string]T + // Missing objects by endpoint name + Missing []string + // Extra objects that should be pruned + Extra []T +} + +// CurrentObjectNames returns a slice of the names of the current objects. +func (d *ObjectDiffer[T]) CurrentObjectNames() []string { + names := make([]string, len(d.Current)) + + var idx int + + for k := range d.Current { + names[idx] = k + + idx++ + } + + return names +} + +// SetMissing sets the missing objects based on the slice of all expected object names. +func (d *ObjectDiffer[T]) SetMissing(allExpectedNames []string) { + d.Missing = StringSliceDifference( + d.CurrentObjectNames(), + allExpectedNames, + ) +} + +// SetExtra sets the extra objects based on the slice of all expected object names and the current +// objects -- `Current` must be set prior to calling this or things will be weird. +func (d *ObjectDiffer[T]) SetExtra(allExpectedNames []string) { + extraNames := StringSliceDifference( + allExpectedNames, + d.CurrentObjectNames(), + ) + + d.Extra = make([]T, len(extraNames)) + + for idx, extraName := range extraNames { + d.Extra[idx] = d.Current[extraName] + } +} diff --git a/util/objectdiffer_test.go b/util/objectdiffer_test.go new file mode 100644 index 00000000..ac26c41e --- /dev/null +++ b/util/objectdiffer_test.go @@ -0,0 +1,122 @@ +package util_test + +import ( + "testing" + + clabernetesutil "github.com/srl-labs/clabernetes/util" + + clabernetestesthelper "github.com/srl-labs/clabernetes/testhelper" +) + +func TestObjectDifferGetCurrentObjectNames(t *testing.T) { + cases := []struct { + name string + current map[string]string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + expected: []string{"one", "two"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + actual := od.CurrentObjectNames() + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} + +func TestObjectDifferSetMissing(t *testing.T) { + cases := []struct { + name string + current map[string]string + allExpected []string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + allExpected: []string{"one", "two", "seven", "eleven"}, + expected: []string{"seven", "eleven"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + od.SetMissing(testCase.allExpected) + + actual := od.Missing + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +} + +func TestObjectDifferSeExtra(t *testing.T) { + cases := []struct { + name string + current map[string]string + allExpected []string + expected []string + }{ + { + name: "simple", + current: map[string]string{ + "one": "something", + "two": "neato", + }, + allExpected: []string{"one", "seven", "eleven"}, + expected: []string{"neato"}, + }, + } + + for _, testCase := range cases { + t.Run( + testCase.name, + func(t *testing.T) { + t.Logf("%s: starting", testCase.name) + + od := clabernetesutil.ObjectDiffer[string]{ + Current: testCase.current, + } + + od.SetExtra(testCase.allExpected) + + actual := od.Extra + + if !clabernetesutil.StringSliceContainsAll(actual, testCase.expected) { + clabernetestesthelper.FailOutput(t, actual, testCase.expected) + } + }) + } +}