From cc3ce10d6514ddc5668bd6b01696f62458ec6b85 Mon Sep 17 00:00:00 2001 From: mgianluc Date: Fri, 11 Oct 2024 16:34:30 +0200 Subject: [PATCH] Bearer token Each API expects bearer token to be present in the authorization header in the form ``` Authorization: Bearer ``` Each request will: 1. Get token from authorization header 2. Validate the token by querying the ServerVersion 3. Get user from token At this point behavior is different from query to query: 1. queries to fetch SveltosClusters or CAPI Clusters will first verify if user has permissions to list cluster instances in all namespaces. If so data cached by the manager is returned. If not, walk all existing clusters and for each cluster validate whether user is allowed to get it. Return only clusters the user has permissions for. 2. queries to get helm charts/resources/profiles for a given cluster will first verify whether the user has permission to get that specific cluster. Only if permissions are in place, result will be returned. --- Makefile | 2 +- README.md | 39 ++++ cmd/main.go | 2 +- go.mod | 2 +- go.sum | 4 +- .../sveltoscluster_controller_test.go | 8 +- internal/server/http.go | 167 +++++++++++++- internal/server/kubeconfig.go | 209 ++++++++++++++++++ internal/server/manager.go | 121 +++++++++- internal/server/manager_test.go | 43 ++-- 10 files changed, 559 insertions(+), 38 deletions(-) create mode 100644 internal/server/kubeconfig.go diff --git a/Makefile b/Makefile index d277eb4..1c8dc92 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,7 @@ KIND := $(TOOLS_BIN_DIR)/kind KUBECTL := $(TOOLS_BIN_DIR)/kubectl GOLANGCI_LINT_VERSION := "v1.59.0" -CLUSTERCTL_VERSION := "v1.8.3" +CLUSTERCTL_VERSION := "v1.8.4" KUSTOMIZE_VER := v5.3.0 KUSTOMIZE_BIN := kustomize diff --git a/README.md b/README.md index 9470eb6..e34c96b 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,12 @@ Please refere to sveltos [documentation](https://projectsveltos.github.io/svelto ## What this repository is This repo contains a service that exposes all APIs used by Sveltos frontend. +### Authorization Header + +Using authorization header is the only way to contact backend as an user, when accessing it over HTTP. +To use authorization header you simply need to pass `Authorization: Bearer ` in every request. +To create sample user and to get its token, see [Creating sample user](#how-to-get-token) guide. + ### Get ClusterAPI powered clusters ```/capiclusters``` @@ -260,6 +266,39 @@ returns } ``` +### How to get token + +First, create a service account in the desired namespace: + +``` +kubectl create sa -n +``` + +Give the service account permissions to access the Calico Enterprise Manager UI, and a Calico Enterprise cluster role: + +``` +kubectl create clusterrolebinding --clusterrole --serviceaccount : +``` + +where: + +- **binding_name** is a descriptive name for the rolebinding. +- **role_name** is one of the default cluster roles (or a custom cluster role) specifying permissions. +- **namespace** is the service account's namespace. +- **service_account** is the service account that the permissions are being associated with. + +Next, create a bearer token for the service account. Using the running example of a service account named, sveltos in the _default_ namespace: + +``` +kubectl create token sveltos --duration=24h +``` + +it should print somthing like + +``` +eyJhbGciOiJSUzI1NiIsImtpZCI6IkVsYW8zRU9BMWw3UTZ2QUpjNGFRLXljcTU4M1NhaXBZd1ZNWXJySkVtMTAifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI4NzE3NjA0LCJpYXQiOjE3Mjg2MzEyMDQsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiZmQ1OWU4OTctODZlNS00MDQ4LWEwZjAtMDMxYjM5MjVlYjQwIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJwbGF0Zm9ybSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJtZ2lhbmx1YyIsInVpZCI6ImJjZWUwZDEwLWM2MTQtNDIzNi1iNmZmLTAyYWU2M2M1MjcxZiJ9fSwibmJmIjoxNzI4NjMxMjA0LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6cGxhdGZvcm06bWdpYW5sdWMifQ.JlEN38Whyb4mlNsWkC4RAQ82dVUJmWvmdvar7VVxEw2SUgoQthQQPsV-l28bGYpuQspFlsdaO2JRdhm6MGctlMJziweHHm3PNv_RBnFMPRQ01y7ciaRZXE7HEB3sAndvBEQKNWyo4wmmyRnEE2tR79ICQRTLmuWO17MjRIZFChXMHsCsam5OsuE6mE1fj3RSUSbvfRbQwrsTcWOrnYxzquyNVyJyOKxQ97Nm175rez5x9EflHPwueYu5FmNgz3cxMsdkHwkrMnhMqMyNN8WBqKUrju-gPJ9GB-cOcrR_38JyeQBPXYTo9J0tueIWEyaiwKvmPqAsnyHKPT5p-7hFCQ +``` + ## Contributing ❤️ Your contributions are always welcome! If you want to contribute, have questions, noticed any bug or want to get the latest project news, you can connect with us in the following ways: diff --git a/cmd/main.go b/cmd/main.go index 3f3d19b..a8f715e 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -145,7 +145,7 @@ func main() { go startClusterController(ctx, mgr, setupLog) go startClusterSummaryController(mgr) - server.InitializeManagerInstance(ctx, mgr.GetClient(), scheme, + server.InitializeManagerInstance(ctx, mgr.GetConfig(), mgr.GetClient(), scheme, httpPort, ctrl.Log.WithName("gin")) setupLog.Info("starting manager") diff --git a/go.mod b/go.mod index 95213cb..d15c031 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( k8s.io/client-go v0.31.1 k8s.io/component-base v0.31.1 k8s.io/klog/v2 v2.130.1 - sigs.k8s.io/cluster-api v1.8.3 + sigs.k8s.io/cluster-api v1.8.4 sigs.k8s.io/controller-runtime v0.19.0 ) diff --git a/go.sum b/go.sum index 02b5f3d..694a499 100644 --- a/go.sum +++ b/go.sum @@ -293,8 +293,8 @@ k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.8.3 h1:N6i25rF5QMadwVg2UPfuO6CzmNXjqnF2r1MAO+kcsro= -sigs.k8s.io/cluster-api v1.8.3/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= +sigs.k8s.io/cluster-api v1.8.4 h1:jBKQH1H/HUdUFk8T6qDzIxZJfWw1F5ZP0ZpYQJDmTHs= +sigs.k8s.io/cluster-api v1.8.4/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/internal/controller/sveltoscluster_controller_test.go b/internal/controller/sveltoscluster_controller_test.go index 7e541b9..9d09795 100644 --- a/internal/controller/sveltoscluster_controller_test.go +++ b/internal/controller/sveltoscluster_controller_test.go @@ -67,7 +67,7 @@ var _ = Describe("SveltosClusterReconciler", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, httpPort, logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, httpPort, logger) reconciler := getSveltosClusterReconciler(c) @@ -89,7 +89,8 @@ var _ = Describe("SveltosClusterReconciler", func() { } manager := server.GetManagerInstance() - clusters := manager.GetManagedSveltosClusters() + clusters, err := manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok := clusters[*cluster] Expect(ok).To(BeTrue()) @@ -101,7 +102,8 @@ var _ = Describe("SveltosClusterReconciler", func() { }) Expect(err).ToNot(HaveOccurred()) - clusters = manager.GetManagedSveltosClusters() + clusters, err = manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok = clusters[*cluster] Expect(ok).To(BeFalse()) diff --git a/internal/server/http.go b/internal/server/http.go index a27b928..2079ffe 100644 --- a/internal/server/http.go +++ b/internal/server/http.go @@ -18,6 +18,7 @@ package server import ( "context" + "errors" "fmt" "net/http" "sort" @@ -38,6 +39,10 @@ const ( maxItems = 6 ) +type Token struct { + Value string `json:"token,omitempty"` +} + var ( ginLogger logr.Logger @@ -50,12 +55,32 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("filters: namespace %q name %q labels %q", filters.Namespace, filters.name, filters.labelSelector)) + user, err := validateToken(c) + if err != nil { + return + } + manager := GetManagerInstance() - clusters := manager.GetManagedCAPIClusters() + + canListAll, err := manager.canListCAPIClusters(user) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + + clusters, err := manager.GetManagedCAPIClusters(c.Request.Context(), canListAll, user) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + managedClusterData := getManagedClusterData(clusters, filters) sort.Sort(managedClusterData) @@ -63,6 +88,7 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } response := ClusterResult{ @@ -83,12 +109,32 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("filters: namespace %q name %q labels %q", filters.Namespace, filters.name, filters.labelSelector)) + user, err := validateToken(c) + if err != nil { + return + } + manager := GetManagerInstance() - clusters := manager.GetManagedSveltosClusters() + + canListAll, err := manager.canListSveltosClusters(user) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + + clusters, err := manager.GetManagedSveltosClusters(c.Request.Context(), canListAll, user) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + managedClusterData := getManagedClusterData(clusters, filters) sort.Sort(managedClusterData) @@ -96,6 +142,7 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } response := ClusterResult{ @@ -116,12 +163,31 @@ var ( limit, skip := getLimitAndSkipFromQuery(c) ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("limit %d skip %d", limit, skip)) + user, err := validateToken(c) + if err != nil { + return + } + manager := GetManagerInstance() + + canGetCluster, err := manager.canGetCluster(namespace, name, user, clusterType) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + + if !canGetCluster { + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("no permissions to access this cluster")) + return + } + helmCharts, err := manager.getHelmChartsForCluster(c.Request.Context(), namespace, name, clusterType) if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } sort.Slice(helmCharts, func(i, j int) bool { return sortHelmCharts(helmCharts, i, j) @@ -131,6 +197,7 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } response := HelmReleaseResult{ @@ -150,12 +217,31 @@ var ( ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("cluster %s:%s/%s", clusterType, namespace, name)) ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("limit %d skip %d", limit, skip)) + user, err := validateToken(c) + if err != nil { + return + } + manager := GetManagerInstance() + + canGetCluster, err := manager.canGetCluster(namespace, name, user, clusterType) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + + if !canGetCluster { + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("no permissions to access this cluster")) + return + } + resources, err := manager.getResourcesForCluster(c.Request.Context(), namespace, name, clusterType) if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } sort.Slice(resources, func(i, j int) bool { return sortResources(resources, i, j) @@ -165,6 +251,7 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } response := ResourceResult{ @@ -186,7 +273,25 @@ var ( ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("limit %d skip %d", limit, skip)) ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("failed %t", failedOnly)) + user, err := validateToken(c) + if err != nil { + return + } + manager := GetManagerInstance() + + canGetCluster, err := manager.canGetCluster(namespace, name, user, clusterType) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to verify permissions %s: %v", c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, err) + return + } + + if !canGetCluster { + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("no permissions to access this cluster")) + return + } + clusterProfileStatuses := manager.GetClusterProfileStatusesByCluster(&namespace, &name, clusterType) flattenedProfileStatuses := flattenProfileStatuses(clusterProfileStatuses, failedOnly) @@ -198,6 +303,7 @@ var ( if err != nil { ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err)) _ = c.AbortWithError(http.StatusBadRequest, err) + return } c.JSON(http.StatusOK, gin.H{ @@ -357,3 +463,60 @@ func getClusterFromQuery(c *gin.Context) (namespace, name string, clusterType li c.JSON(http.StatusBadRequest, gin.H{"error": "cluster type is incorrect"}) return } + +func getTokenFromAuthorizationHeader(c *gin.Context) (string, error) { + // Get the authorization header value + authorizationHeader := c.GetHeader("Authorization") + + // Check if the authorization header is present + if authorizationHeader == "" { + errorMsg := "authorization header is missing" + c.JSON(http.StatusUnauthorized, gin.H{"error": errorMsg}) + return "", errors.New(errorMsg) + } + + // Extract the token from the authorization header + // Assuming the authorization header format is "Bearer " + token := authorizationHeader[len("Bearer "):] + // Check if the token is present + if token == "" { + errorMsg := "token is missing" + c.JSON(http.StatusUnauthorized, gin.H{"error": errorMsg}) + return "", errors.New(errorMsg) + } + + return token, nil +} + +// validateToken: +// - gets token from authorization request. Returns an error if missing +// - validate token. Returns an error if this check fails +// - get and return user info. Returns an error if getting user from token fails +func validateToken(c *gin.Context) (string, error) { + token, err := getTokenFromAuthorizationHeader(c) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get token from authorization request. Request %s, error %v", + c.Request.URL, err)) + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("failed to get token from authorization request")) + return "", err + } + + manager := GetManagerInstance() + err = manager.validateToken(token) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to validate token: %v", err)) + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("failed to validate token")) + return "", err + } + + user, err := manager.getUserFromToken(token) + if err != nil { + ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get user from token: %v", err)) + _ = c.AbortWithError(http.StatusUnauthorized, errors.New("failed to get user from token")) + return "", err + } + + ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("user %s", user)) + + return user, nil +} diff --git a/internal/server/kubeconfig.go b/internal/server/kubeconfig.go new file mode 100644 index 0000000..d3c49cd --- /dev/null +++ b/internal/server/kubeconfig.go @@ -0,0 +1,209 @@ +/* +Copyright 2024. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "fmt" + + authenticationv1 "k8s.io/api/authentication/v1" + authorizationapi "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + authenticationv1client "k8s.io/client-go/kubernetes/typed/authentication/v1" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" + logs "github.com/projectsveltos/libsveltos/lib/logsettings" +) + +func (m *instance) getKubernetesRestConfig(token string) (*rest.Config, error) { + const ( + rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ) + + tlsClientConfig := rest.TLSClientConfig{} + if _, err := certutil.NewPool(rootCAFile); err != nil { + return nil, err + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &rest.Config{ + BearerToken: token, + Host: m.config.Host, + TLSClientConfig: tlsClientConfig, + }, nil +} + +func (m *instance) getUserFromToken(token string) (string, error) { + config, err := m.getKubernetesRestConfig(token) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get restConfig: %v", err)) + return "", err + } + + authV1Client, err := authenticationv1client.NewForConfig(config) + if err != nil { + return "", err + } + + res, err := authV1Client.SelfSubjectReviews(). + Create(context.TODO(), &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if err != nil { + return "", err + } + + return res.Status.UserInfo.Username, nil +} + +// canListSveltosClusters returns true if user can list all SveltosClusters in all namespaces +func (m *instance) canListSveltosClusters(user string) (bool, error) { + // Create a Kubernetes clientset + clientset, err := kubernetes.NewForConfig(m.config) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get clientset: %v", err)) + return false, err + } + + sar := &authorizationapi.SubjectAccessReview{ + Spec: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Verb: "list", + Group: libsveltosv1beta1.GroupVersion.Group, + Version: libsveltosv1beta1.GroupVersion.Version, + Resource: libsveltosv1beta1.SveltosClusterKind, + }, + User: user, + }, + } + + canI, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to check clientset permissions: %v", err)) + return false, err + } + + return canI.Status.Allowed, nil +} + +// canGetSveltosCluster returns true if user can access SveltosCluster clusterNamespace:clusterName +func (m *instance) canGetSveltosCluster(clusterNamespace, clusterName, user string) (bool, error) { + // Create a Kubernetes clientset + clientset, err := kubernetes.NewForConfig(m.config) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get clientset: %v", err)) + return false, err + } + + sar := &authorizationapi.SubjectAccessReview{ + Spec: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Verb: "get", + Group: libsveltosv1beta1.GroupVersion.Group, + Version: libsveltosv1beta1.GroupVersion.Version, + Resource: libsveltosv1beta1.SveltosClusterKind, + Namespace: clusterNamespace, + Name: clusterName, + }, + User: user, + }, + } + + canI, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to check clientset permissions: %v", err)) + return false, err + } + + return canI.Status.Allowed, nil +} + +// canListCAPIClusters returns true if user can list all CAPI Clusters in all namespaces +func (m *instance) canListCAPIClusters(user string) (bool, error) { + // Create a Kubernetes clientset + clientset, err := kubernetes.NewForConfig(m.config) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get clientset: %v", err)) + return false, err + } + + sar := &authorizationapi.SubjectAccessReview{ + Spec: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Verb: "list", + Group: clusterv1.GroupVersion.Group, + Version: clusterv1.GroupVersion.Version, + Resource: clusterv1.ClusterKind, + }, + User: user, + }, + } + + canI, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to check clientset permissions: %v", err)) + return false, err + } + + return canI.Status.Allowed, nil +} + +// canGetCAPICluster returns true if user can access CAPI Cluster clusterNamespace:clusterName +func (m *instance) canGetCAPICluster(clusterNamespace, clusterName, user string) (bool, error) { + // Create a Kubernetes clientset + clientset, err := kubernetes.NewForConfig(m.config) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get clientset: %v", err)) + return false, err + } + + sar := &authorizationapi.SubjectAccessReview{ + Spec: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Verb: "get", + Group: clusterv1.GroupVersion.Group, + Version: clusterv1.GroupVersion.Version, + Resource: clusterv1.ClusterKind, + Namespace: clusterNamespace, + Name: clusterName, + }, + User: user, + }, + } + + canI, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to check clientset permissions: %v", err)) + return false, err + } + + return canI.Status.Allowed, nil +} + +// canGetCluster verifies whether user has permission to view CAPI/Sveltos Cluster +func (m *instance) canGetCluster(clusterNamespace, clusterName, user string, + clusterType libsveltosv1beta1.ClusterType) (bool, error) { + + if clusterType == libsveltosv1beta1.ClusterTypeCapi { + return m.canGetCAPICluster(clusterNamespace, clusterName, user) + } + + return m.canGetSveltosCluster(clusterNamespace, clusterName, user) +} diff --git a/internal/server/manager.go b/internal/server/manager.go index 05415b3..2c602d8 100644 --- a/internal/server/manager.go +++ b/internal/server/manager.go @@ -18,17 +18,20 @@ package server import ( "context" + "fmt" "sync" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/go-logr/logr" - configv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" + logs "github.com/projectsveltos/libsveltos/lib/logsettings" ) type ClusterInfo struct { @@ -54,10 +57,12 @@ type ClusterFeatureSummary struct { } type instance struct { + config *rest.Config client client.Client scheme *runtime.Scheme clusterMux sync.RWMutex // use a Mutex to update managed Clusters clusterStatusesMux sync.RWMutex // mutex to update cached ClusterSummary instances + logger logr.Logger sveltosClusters map[corev1.ObjectReference]ClusterInfo capiClusters map[corev1.ObjectReference]ClusterInfo @@ -70,14 +75,15 @@ var ( ) // InitializeManagerInstance initializes manager instance -func InitializeManagerInstance(ctx context.Context, c client.Client, scheme *runtime.Scheme, - port string, logger logr.Logger) { +func InitializeManagerInstance(ctx context.Context, config *rest.Config, c client.Client, + scheme *runtime.Scheme, port string, logger logr.Logger) { if managerInstance == nil { lock.Lock() defer lock.Unlock() if managerInstance == nil { managerInstance = &instance{ + config: config, client: c, sveltosClusters: make(map[corev1.ObjectReference]ClusterInfo), capiClusters: make(map[corev1.ObjectReference]ClusterInfo), @@ -85,6 +91,7 @@ func InitializeManagerInstance(ctx context.Context, c client.Client, scheme *run clusterMux: sync.RWMutex{}, clusterStatusesMux: sync.RWMutex{}, scheme: scheme, + logger: logger, } go func() { @@ -98,16 +105,87 @@ func GetManagerInstance() *instance { return managerInstance } -func (m *instance) GetManagedSveltosClusters() map[corev1.ObjectReference]ClusterInfo { - m.clusterMux.RLock() - defer m.clusterMux.RUnlock() - return m.sveltosClusters +func (m *instance) GetManagedSveltosClusters(ctx context.Context, canListAll bool, user string, +) (map[corev1.ObjectReference]ClusterInfo, error) { + + // If user can list all SveltosClusters, return cached data + if canListAll { + m.clusterMux.RLock() + defer m.clusterMux.RUnlock() + return m.sveltosClusters, nil + } + + // If user cannot list all SveltosClusters, run a List so to get only SveltosClusters user has access to + // List (vs using m.sveltosClusters) is intentionally done to avoid taking lock for too long + sveltosClusters := &libsveltosv1beta1.SveltosClusterList{} + err := m.client.List(ctx, sveltosClusters) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to list SveltosClusters: %v", err)) + return nil, err + } + + result := map[corev1.ObjectReference]ClusterInfo{} + for i := range sveltosClusters.Items { + sc := &sveltosClusters.Items[i] + ok, err := m.canGetSveltosCluster(sc.Namespace, sc.Name, user) + if err != nil { + continue + } + if ok { + info := ClusterInfo{ + Labels: sc.Labels, + Version: sc.Status.Version, + Ready: sc.Status.Ready, + FailureMessage: sc.Status.FailureMessage, + } + + sveltosClusterInfo := getKeyFromObject(m.scheme, sc) + result[*sveltosClusterInfo] = info + } + } + + return result, nil } -func (m *instance) GetManagedCAPIClusters() map[corev1.ObjectReference]ClusterInfo { - m.clusterMux.RLock() - defer m.clusterMux.RUnlock() - return m.capiClusters +func (m *instance) GetManagedCAPIClusters(ctx context.Context, canListAll bool, user string, +) (map[corev1.ObjectReference]ClusterInfo, error) { + + // If user can list all CAPI Clusters, return cached data + if canListAll { + m.clusterMux.RLock() + defer m.clusterMux.RUnlock() + return m.capiClusters, nil + } + + // If user cannot list all SveltosClusters, run a List so to get only SveltosClusters user has access to + // List (vs using m.capiClusters) is intentionally done to avoid taking lock for too long + clusters := &clusterv1.ClusterList{} + err := m.client.List(ctx, clusters) + if err != nil { + m.logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to list Clusters: %v", err)) + return nil, err + } + + result := map[corev1.ObjectReference]ClusterInfo{} + for i := range clusters.Items { + capiCluster := &clusters.Items[i] + ok, err := m.canGetCAPICluster(capiCluster.Namespace, capiCluster.Name, user) + if err != nil { + continue + } + if ok { + info := ClusterInfo{ + Labels: capiCluster.Labels, + Ready: capiCluster.Status.ControlPlaneReady, + FailureMessage: capiCluster.Status.FailureMessage, + } + + capiClusterInfo := getKeyFromObject(m.scheme, capiCluster) + result[*capiClusterInfo] = info + } + } + + return result, nil } func (m *instance) GetClusterProfileStatuses() map[corev1.ObjectReference]ClusterProfileStatus { @@ -301,3 +379,22 @@ func MapToClusterFeatureSummaries(featureSummaries *[]configv1beta1.FeatureSumma return clusterFeatureSummaries } + +func (m *instance) validateToken(token string) error { + config, err := m.getKubernetesRestConfig(token) + if err != nil { + return err + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + + _, err = clientset.Discovery().ServerVersion() + if err != nil { + return err + } + + return nil +} diff --git a/internal/server/manager_test.go b/internal/server/manager_test.go index 283e3da..5fb4c0e 100644 --- a/internal/server/manager_test.go +++ b/internal/server/manager_test.go @@ -196,11 +196,12 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() manager.AddSveltosCluster(sveltosCluster) - clusters := manager.GetManagedSveltosClusters() + clusters, err := manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) v, ok := clusters[*clusterRef] Expect(ok).To(BeTrue()) Expect(reflect.DeepEqual(v, clusterInfo)).To(BeTrue()) @@ -217,22 +218,25 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() manager.AddSveltosCluster(sveltosCluster) - clusters := manager.GetManagedSveltosClusters() + clusters, err := manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok := clusters[*clusterRef] Expect(ok).To(BeTrue()) manager.RemoveSveltosCluster(sveltosCluster.Namespace, sveltosCluster.Name) - clusters = manager.GetManagedSveltosClusters() + clusters, err = manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok = clusters[*clusterRef] Expect(ok).To(BeFalse()) // verify operation is idempotent manager.RemoveSveltosCluster(sveltosCluster.Namespace, sveltosCluster.Name) - clusters = manager.GetManagedSveltosClusters() + clusters, err = manager.GetManagedSveltosClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok = clusters[*clusterRef] Expect(ok).To(BeFalse()) }) @@ -255,11 +259,12 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() manager.AddCAPICluster(cluster) - clusters := manager.GetManagedCAPIClusters() + clusters, err := manager.GetManagedCAPIClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) v, ok := clusters[*clusterRef] Expect(ok).To(BeTrue()) Expect(reflect.DeepEqual(v, clusterInfo)).To(BeTrue()) @@ -276,22 +281,25 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() manager.AddCAPICluster(cluster) - clusters := manager.GetManagedCAPIClusters() + clusters, err := manager.GetManagedCAPIClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok := clusters[*clusterRef] Expect(ok).To(BeTrue()) manager.RemoveCAPICluster(cluster.Namespace, cluster.Name) - clusters = manager.GetManagedCAPIClusters() + clusters, err = manager.GetManagedCAPIClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok = clusters[*clusterRef] Expect(ok).To(BeFalse()) // verify operation is idempotent manager.RemoveCAPICluster(cluster.Namespace, cluster.Name) - clusters = manager.GetManagedCAPIClusters() + clusters, err = manager.GetManagedCAPIClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) _, ok = clusters[*clusterRef] Expect(ok).To(BeFalse()) }) @@ -323,7 +331,7 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() // test it has been added @@ -351,7 +359,7 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() // test it has been added @@ -375,11 +383,14 @@ var _ = Describe("Manager", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - server.InitializeManagerInstance(ctx, c, scheme, randomPort(), logger) + server.InitializeManagerInstance(ctx, nil, c, scheme, randomPort(), logger) manager := server.GetManagerInstance() // make sure there's already an existing cluster in the manager - Expect(len(manager.GetManagedCAPIClusters()) == 1).To(BeTrue()) + clusters, err := manager.GetManagedCAPIClusters(context.TODO(), true, randomString()) + Expect(err).To(BeNil()) + + Expect(len(clusters) == 1).To(BeTrue()) manager.AddClusterProfileStatus(properClusterSummary) manager.AddClusterProfileStatus(additionalClusterSummary)