diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 46a39dcbcbc0..f450b0038733 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -20098,6 +20098,14 @@ "producer" ], "properties": { + "clusterBeforeFailover": { + "description": "ClusterBeforeFailover records the clusters where running the application before failover.", + "type": "array", + "items": { + "type": "string", + "default": "" + } + }, "creationTimestamp": { "description": "CreationTimestamp is a timestamp representing the server time when this object was created. Clients should not set this value to avoid the time inconsistency issue. It is represented in RFC3339 form(like '2021-04-25T10:02:10Z') and is in UTC.\n\nPopulated by the system. Read-only.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml index cc9404713de3..55421bbd9802 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml @@ -401,6 +401,12 @@ spec: description: GracefulEvictionTask represents a graceful eviction task. properties: + clusterBeforeFailover: + description: ClusterBeforeFailover records the clusters where + running the application before failover. + items: + type: string + type: array creationTimestamp: description: |- CreationTimestamp is a timestamp representing the server time when this object was diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml index 4e4773ed1c21..671d56f460df 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml @@ -401,6 +401,12 @@ spec: description: GracefulEvictionTask represents a graceful eviction task. properties: + clusterBeforeFailover: + description: ClusterBeforeFailover records the clusters where + running the application before failover. + items: + type: string + type: array creationTimestamp: description: |- CreationTimestamp is a timestamp representing the server time when this object was diff --git a/pkg/apis/work/v1alpha2/binding_types.go b/pkg/apis/work/v1alpha2/binding_types.go index d7c0baf9d3df..9960f9e6ed42 100644 --- a/pkg/apis/work/v1alpha2/binding_types.go +++ b/pkg/apis/work/v1alpha2/binding_types.go @@ -300,6 +300,9 @@ type GracefulEvictionTask struct { // Populated by the system. Read-only. // +optional CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"` + + // ClusterBeforeFailover records the clusters where running the application before failover. + ClusterBeforeFailover []string `json:"clusterBeforeFailover,omitempty"` } // BindingSnapshot is a snapshot of a ResourceBinding or ClusterResourceBinding. diff --git a/pkg/apis/work/v1alpha2/binding_types_helper.go b/pkg/apis/work/v1alpha2/binding_types_helper.go index 01b0366dab63..a0da4ec0bb76 100644 --- a/pkg/apis/work/v1alpha2/binding_types_helper.go +++ b/pkg/apis/work/v1alpha2/binding_types_helper.go @@ -20,12 +20,14 @@ import policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" // TaskOptions represents options for GracefulEvictionTasks. type TaskOptions struct { - purgeMode policyv1alpha1.PurgeMode - producer string - reason string - message string - gracePeriodSeconds *int32 - suppressDeletion *bool + purgeMode policyv1alpha1.PurgeMode + producer string + reason string + message string + gracePeriodSeconds *int32 + suppressDeletion *bool + preservedLabelState map[string]string + clustersBeforeFailover []string } // Option configures a TaskOptions @@ -83,6 +85,20 @@ func WithSuppressDeletion(suppressDeletion *bool) Option { } } +// WithPreservedLabelState sets the preservedLabelState for TaskOptions +func WithPreservedLabelState(preservedLabelState map[string]string) Option { + return func(o *TaskOptions) { + o.preservedLabelState = preservedLabelState + } +} + +// WithClustersBeforeFailover sets the clustersBeforeFailover for TaskOptions +func WithClustersBeforeFailover(clustersBeforeFailover []string) Option { + return func(o *TaskOptions) { + o.clustersBeforeFailover = clustersBeforeFailover + } +} + // TargetContains checks if specific cluster present on the target list. func (s *ResourceBindingSpec) TargetContains(name string) bool { for i := range s.Clusters { @@ -163,13 +179,15 @@ func (s *ResourceBindingSpec) GracefulEvictCluster(name string, options *TaskOpt // build eviction task evictingCluster := evictCluster.DeepCopy() evictionTask := GracefulEvictionTask{ - FromCluster: evictingCluster.Name, - PurgeMode: options.purgeMode, - Reason: options.reason, - Message: options.message, - Producer: options.producer, - GracePeriodSeconds: options.gracePeriodSeconds, - SuppressDeletion: options.suppressDeletion, + FromCluster: evictingCluster.Name, + PurgeMode: options.purgeMode, + Reason: options.reason, + Message: options.message, + Producer: options.producer, + GracePeriodSeconds: options.gracePeriodSeconds, + SuppressDeletion: options.suppressDeletion, + PreservedLabelState: options.preservedLabelState, + ClusterBeforeFailover: options.clustersBeforeFailover, } if evictingCluster.Replicas > 0 { evictionTask.Replicas = &evictingCluster.Replicas diff --git a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go index 559df1a70ad5..55a77fee23e6 100644 --- a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go @@ -160,6 +160,11 @@ func (in *GracefulEvictionTask) DeepCopyInto(out *GracefulEvictionTask) { in, out := &in.CreationTimestamp, &out.CreationTimestamp *out = (*in).DeepCopy() } + if in.ClusterBeforeFailover != nil { + in, out := &in.ClusterBeforeFailover, &out.ClusterBeforeFailover + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -441,6 +446,18 @@ func (in *TaskOptions) DeepCopyInto(out *TaskOptions) { *out = new(bool) **out = **in } + if in.preservedLabelState != nil { + in, out := &in.preservedLabelState, &out.preservedLabelState + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.clustersBeforeFailover != nil { + in, out := &in.clustersBeforeFailover, &out.clustersBeforeFailover + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/controllers/applicationfailover/common.go b/pkg/controllers/applicationfailover/common.go index f152b95e15c9..55a675d9dc0c 100644 --- a/pkg/controllers/applicationfailover/common.go +++ b/pkg/controllers/applicationfailover/common.go @@ -17,12 +17,21 @@ limitations under the License. package applicationfailover import ( + "bytes" + "encoding/json" + "fmt" "sync" + "github.com/karmada-io/karmada/pkg/features" + "k8s.io/utils/ptr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/jsonpath" + "k8s.io/klog/v2" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) @@ -117,3 +126,106 @@ func distinguishUnhealthyClustersWithOthers(aggregatedStatusItems []workv1alpha2 return unhealthyClusters, others } + +func buildPreservedLabelState(statePreservation *policyv1alpha1.StatePreservation, rawStatus []byte) (map[string]string, error) { + results := make(map[string]string, len(statePreservation.Rules)) + for _, rule := range statePreservation.Rules { + value, err := parseJSONValue(rawStatus, rule.JSONPath) + if err != nil { + klog.Errorf("Failed to parse value with jsonPath(%s) from status(%v), error: %v", + rule.JSONPath, string(rawStatus), err) + return nil, err + } + results[rule.AliasLabelName] = value + } + + return results, nil +} + +func parseJSONValue(rawStatus []byte, jsonPath string) (string, error) { + template := jsonPath + j := jsonpath.New(jsonPath) + j.AllowMissingKeys(false) + err := j.Parse(template) + if err != nil { + return "", err + } + + buf := new(bytes.Buffer) + unmarshalled := make(map[string]interface{}) + _ = json.Unmarshal(rawStatus, &unmarshalled) + err = j.Execute(buf, unmarshalled) + if err != nil { + return "", err + } + return buf.String(), nil +} + +func findTargetStatusItemByCluster(aggregatedStatusItems []workv1alpha2.AggregatedStatusItem, cluster string) (workv1alpha2.AggregatedStatusItem, bool) { + if len(aggregatedStatusItems) == 0 { + return workv1alpha2.AggregatedStatusItem{}, false + } + + for index, statusItem := range aggregatedStatusItems { + if statusItem.ClusterName == cluster { + return aggregatedStatusItems[index], true + } + } + + return workv1alpha2.AggregatedStatusItem{}, false +} + +func getClusterNamesFromTargetClusters(targetClusters []workv1alpha2.TargetCluster) []string { + if targetClusters == nil { + return nil + } + + clusters := make([]string, 0, len(targetClusters)) + for _, targetCluster := range targetClusters { + clusters = append(clusters, targetCluster.Name) + } + return clusters +} + +func buildTaskOptions(failoverBehavior *policyv1alpha1.ApplicationFailoverBehavior, aggregatedStatus []workv1alpha2.AggregatedStatusItem, cluster, producer string, clustersBeforeFailover []string) ([]workv1alpha2.Option, error) { + var taskOpts []workv1alpha2.Option + taskOpts = append(taskOpts, workv1alpha2.WithProducer(producer)) + taskOpts = append(taskOpts, workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure)) + taskOpts = append(taskOpts, workv1alpha2.WithPurgeMode(failoverBehavior.PurgeMode)) + + if failoverBehavior.StatePreservation != nil && len(failoverBehavior.StatePreservation.Rules) != 0 { + targetStatusItem, exist := findTargetStatusItemByCluster(aggregatedStatus, cluster) + if !exist || targetStatusItem.Status == nil || targetStatusItem.Status.Raw == nil { + return nil, fmt.Errorf("the binding status under Cluster(%s) has not been colloected yet ", cluster) + } + preservedLabelState, err := buildPreservedLabelState(failoverBehavior.StatePreservation, targetStatusItem.Status.Raw) + if err != nil { + return nil, err + } + if preservedLabelState != nil { + taskOpts = append(taskOpts, workv1alpha2.WithPreservedLabelState(preservedLabelState)) + taskOpts = append(taskOpts, workv1alpha2.WithClustersBeforeFailover(clustersBeforeFailover)) + } + } + + switch failoverBehavior.PurgeMode { + case policyv1alpha1.Graciously: + if features.FeatureGate.Enabled(features.GracefulEviction) { + taskOpts = append(taskOpts, workv1alpha2.WithGracePeriodSeconds(failoverBehavior.GracePeriodSeconds)) + } else { + err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Graciously) + klog.Error(err) + return nil, err + } + case policyv1alpha1.Never: + if features.FeatureGate.Enabled(features.GracefulEviction) { + taskOpts = append(taskOpts, workv1alpha2.WithSuppressDeletion(ptr.To[bool](true))) + } else { + err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Never) + klog.Error(err) + return nil, err + } + } + + return taskOpts, nil +} diff --git a/pkg/controllers/applicationfailover/common_test.go b/pkg/controllers/applicationfailover/common_test.go index a031e75674c2..c8e88ef9acb8 100644 --- a/pkg/controllers/applicationfailover/common_test.go +++ b/pkg/controllers/applicationfailover/common_test.go @@ -17,13 +17,17 @@ limitations under the License. package applicationfailover import ( + "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) @@ -180,3 +184,404 @@ func TestDistinguishUnhealthyClustersWithOthers(t *testing.T) { }) } } + +func Test_parseJSONValue(t *testing.T) { + type args struct { + rawStatus []byte + jsonPath string + } + tests := []struct { + name string + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "target field not found", + args: args{ + rawStatus: []byte(`{"readyReplicas": 2}`), + jsonPath: "{ .replicas }", + }, + wantErr: assert.Error, + }, + { + name: "invalid jsonPath", + args: args{ + rawStatus: []byte(`{"readyReplicas": 2}`), + jsonPath: "{ %replicas }", + }, + wantErr: assert.Error, + }, + { + name: "success to parse", + args: args{ + rawStatus: []byte(`{"replicas": 2}`), + jsonPath: "{ .replicas }", + }, + wantErr: assert.NoError, + want: "2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseJSONValue(tt.args.rawStatus, tt.args.jsonPath) + if !tt.wantErr(t, err, fmt.Sprintf("parseJSONValue(%v, %v)", tt.args.rawStatus, tt.args.jsonPath)) { + return + } + assert.Equalf(t, tt.want, got, "parseJSONValue(%v, %v)", tt.args.rawStatus, tt.args.jsonPath) + }) + } +} + +func Test_getClusterNamesFromTargetClusters(t *testing.T) { + type args struct { + targetClusters []workv1alpha2.TargetCluster + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "nil targetClusters", + args: args{ + targetClusters: nil, + }, + want: nil, + }, + { + name: "normal case", + args: args{ + targetClusters: []workv1alpha2.TargetCluster{ + {Name: "c1", Replicas: 1}, + {Name: "c2", Replicas: 2}, + }, + }, + want: []string{"c1", "c2"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, getClusterNamesFromTargetClusters(tt.args.targetClusters), "getClusterNamesFromTargetClusters(%v)", tt.args.targetClusters) + }) + } +} + +func Test_findTargetStatusItemByCluster(t *testing.T) { + type args struct { + aggregatedStatusItems []workv1alpha2.AggregatedStatusItem + cluster string + } + tests := []struct { + name string + args args + want workv1alpha2.AggregatedStatusItem + wantExist bool + }{ + { + name: "nil aggregatedStatusItems", + args: args{ + aggregatedStatusItems: nil, + cluster: "c1", + }, + want: workv1alpha2.AggregatedStatusItem{}, + wantExist: false, + }, + { + name: "cluster exist in the aggregatedStatusItems", + args: args{ + aggregatedStatusItems: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1"}, + {ClusterName: "c2"}, + }, + cluster: "c1", + }, + want: workv1alpha2.AggregatedStatusItem{ClusterName: "c1"}, + wantExist: true, + }, + { + name: "cluster does not exist in the aggregatedStatusItems", + args: args{ + aggregatedStatusItems: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1"}, + {ClusterName: "c2"}, + }, + cluster: "c?", + }, + want: workv1alpha2.AggregatedStatusItem{}, + wantExist: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := findTargetStatusItemByCluster(tt.args.aggregatedStatusItems, tt.args.cluster) + assert.Equalf(t, tt.want, got, "findTargetStatusItemByCluster(%v, %v)", tt.args.aggregatedStatusItems, tt.args.cluster) + assert.Equalf(t, tt.wantExist, got1, "findTargetStatusItemByCluster(%v, %v)", tt.args.aggregatedStatusItems, tt.args.cluster) + }) + } +} + +func Test_buildPreservedLabelState(t *testing.T) { + type args struct { + statePreservation *policyv1alpha1.StatePreservation + rawStatus []byte + } + tests := []struct { + name string + args args + want map[string]string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "successful case", + args: args{ + statePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .health }"}, + }, + }, + rawStatus: []byte(`{"replicas": 2, "health": true}`), + }, + wantErr: assert.NoError, + want: map[string]string{"key-a": "2", "key-b": "true"}, + }, + { + name: "one statePreservation rule exist not found field", + args: args{ + statePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .notfound }"}, + }, + }, + rawStatus: []byte(`{"replicas": 2, "health": true}`), + }, + wantErr: assert.Error, + want: nil, + }, + { + name: "one statePreservation rule has invalid jsonPath", + args: args{ + statePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ %health }"}, + }, + }, + rawStatus: []byte(`{"replicas": 2, "health": true}`), + }, + wantErr: assert.Error, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildPreservedLabelState(tt.args.statePreservation, tt.args.rawStatus) + if !tt.wantErr(t, err, fmt.Sprintf("buildPreservedLabelState(%v, %v)", tt.args.statePreservation, tt.args.rawStatus)) { + return + } + assert.Equalf(t, tt.want, got, "buildPreservedLabelState(%v, %v)", tt.args.statePreservation, tt.args.rawStatus) + }) + } +} + +func Test_buildTaskOptions(t *testing.T) { + type args struct { + failoverBehavior *policyv1alpha1.ApplicationFailoverBehavior + aggregatedStatus []workv1alpha2.AggregatedStatusItem + cluster string + producer string + clustersBeforeFailover []string + } + tests := []struct { + name string + args args + want workv1alpha2.TaskOptions + wantErr assert.ErrorAssertionFunc + }{ + { + name: "Graciously purgeMode with ResourceBinding", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](120), + StatePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .health }"}, + }, + }, + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 2, "health": true}`)}}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: RBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + want: *workv1alpha2.NewTaskOptions( + workv1alpha2.WithPurgeMode(policyv1alpha1.Graciously), + workv1alpha2.WithProducer(RBApplicationFailoverControllerName), + workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), + workv1alpha2.WithGracePeriodSeconds(ptr.To[int32](120)), + workv1alpha2.WithPreservedLabelState(map[string]string{"key-a": "2", "key-b": "true"}), + workv1alpha2.WithClustersBeforeFailover([]string{"c0"})), + wantErr: assert.NoError, + }, + { + name: "Never purgeMode with ClusterResourceBinding", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Never, + StatePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .health }"}, + }, + }, + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 2, "health": true}`)}}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: CRBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + want: *workv1alpha2.NewTaskOptions( + workv1alpha2.WithPurgeMode(policyv1alpha1.Never), + workv1alpha2.WithProducer(CRBApplicationFailoverControllerName), + workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), + workv1alpha2.WithPreservedLabelState(map[string]string{"key-a": "2", "key-b": "true"}), + workv1alpha2.WithSuppressDeletion(ptr.To[bool](true)), + workv1alpha2.WithClustersBeforeFailover([]string{"c0"})), + wantErr: assert.NoError, + }, + { + name: "Immediately purgeMode with ClusterResourceBinding", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Immediately, + StatePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .health }"}, + }, + }, + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 2, "health": true}`)}}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: CRBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + want: *workv1alpha2.NewTaskOptions( + workv1alpha2.WithPurgeMode(policyv1alpha1.Immediately), + workv1alpha2.WithProducer(CRBApplicationFailoverControllerName), + workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), + workv1alpha2.WithPreservedLabelState(map[string]string{"key-a": "2", "key-b": "true"}), + workv1alpha2.WithClustersBeforeFailover([]string{"c0"})), + wantErr: assert.NoError, + }, + { + name: "Graciously purgeMode with ResourceBinding, StatePreservation is nil", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](120), + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 2, "health": true}`)}}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: RBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + want: *workv1alpha2.NewTaskOptions( + workv1alpha2.WithPurgeMode(policyv1alpha1.Graciously), + workv1alpha2.WithProducer(RBApplicationFailoverControllerName), + workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), + workv1alpha2.WithGracePeriodSeconds(ptr.To[int32](120))), + wantErr: assert.NoError, + }, + { + name: "Graciously purgeMode with ResourceBinding, StatePreservation.Rules is nil", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](120), + StatePreservation: &policyv1alpha1.StatePreservation{}, + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 2, "health": true}`)}}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: RBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + want: *workv1alpha2.NewTaskOptions( + workv1alpha2.WithPurgeMode(policyv1alpha1.Graciously), + workv1alpha2.WithProducer(RBApplicationFailoverControllerName), + workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), + workv1alpha2.WithGracePeriodSeconds(ptr.To[int32](120))), + wantErr: assert.NoError, + }, + { + name: "Graciously purgeMode with ResourceBinding, target cluster status in not collected", + args: args{ + failoverBehavior: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: ptr.To[int32](100), + }, + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](120), + StatePreservation: &policyv1alpha1.StatePreservation{ + Rules: []policyv1alpha1.StatePreservationRule{ + {AliasLabelName: "key-a", JSONPath: "{ .replicas }"}, + {AliasLabelName: "key-b", JSONPath: "{ .health }"}, + }, + }, + }, + aggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "c1"}, + {ClusterName: "c2", Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3, "health": false}`)}}, + }, + cluster: "c1", + producer: RBApplicationFailoverControllerName, + clustersBeforeFailover: []string{"c0"}, + }, + wantErr: assert.Error, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildTaskOptions(tt.args.failoverBehavior, tt.args.aggregatedStatus, tt.args.cluster, tt.args.producer, tt.args.clustersBeforeFailover) + if !tt.wantErr(t, err, fmt.Sprintf("buildTaskOptions(%v, %v, %v, %v, %v)", tt.args.failoverBehavior, tt.args.aggregatedStatus, tt.args.cluster, tt.args.producer, tt.args.clustersBeforeFailover)) { + return + } + gotTaskOptions := workv1alpha2.NewTaskOptions(got...) + assert.Equalf(t, tt.want, *gotTaskOptions, "buildTaskOptions(%v, %v, %v, %v, %v)", tt.args.failoverBehavior, tt.args.aggregatedStatus, tt.args.cluster, tt.args.producer, tt.args.clustersBeforeFailover) + }) + } +} diff --git a/pkg/controllers/applicationfailover/crb_application_failover_controller.go b/pkg/controllers/applicationfailover/crb_application_failover_controller.go index 809d08bfebc4..f57714449eec 100644 --- a/pkg/controllers/applicationfailover/crb_application_failover_controller.go +++ b/pkg/controllers/applicationfailover/crb_application_failover_controller.go @@ -18,7 +18,6 @@ package applicationfailover import ( "context" - "fmt" "math" "time" @@ -28,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" - policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/features" "github.com/karmada-io/karmada/pkg/resourceinterpreter" @@ -153,40 +150,15 @@ func (c *CRBApplicationFailoverController) syncBinding(ctx context.Context, bind } func (c *CRBApplicationFailoverController) evictBinding(binding *workv1alpha2.ClusterResourceBinding, clusters []string) error { + clustersBeforeFailover := getClusterNamesFromTargetClusters(binding.Spec.Clusters) for _, cluster := range clusters { - switch binding.Spec.Failover.Application.PurgeMode { - case policyv1alpha1.Graciously: - if features.FeatureGate.Enabled(features.GracefulEviction) { - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Graciously), - workv1alpha2.WithProducer(CRBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), - workv1alpha2.WithGracePeriodSeconds(binding.Spec.Failover.Application.GracePeriodSeconds))) - } else { - err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Graciously) - klog.Error(err) - return err - } - case policyv1alpha1.Never: - if features.FeatureGate.Enabled(features.GracefulEviction) { - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Never), - workv1alpha2.WithProducer(CRBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), - workv1alpha2.WithSuppressDeletion(ptr.To[bool](true)))) - } else { - err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Never) - klog.Error(err) - return err - } - case policyv1alpha1.Immediately: - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Immediately), - workv1alpha2.WithProducer(CRBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure))) + taskOpts, err := buildTaskOptions(binding.Spec.Failover.Application, binding.Status.AggregatedStatus, cluster, CRBApplicationFailoverControllerName, clustersBeforeFailover) + if err != nil { + klog.Errorf("failed to build TaskOptions for ResourceBinding(%s/%s) under Cluster(%s): %v", binding.Namespace, binding.Name, cluster, err) + return err } + binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions(taskOpts...)) } - return nil } diff --git a/pkg/controllers/applicationfailover/rb_application_failover_controller.go b/pkg/controllers/applicationfailover/rb_application_failover_controller.go index c0c20d94a885..572d69f24795 100644 --- a/pkg/controllers/applicationfailover/rb_application_failover_controller.go +++ b/pkg/controllers/applicationfailover/rb_application_failover_controller.go @@ -18,7 +18,6 @@ package applicationfailover import ( "context" - "fmt" "math" "time" @@ -28,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" - policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/features" "github.com/karmada-io/karmada/pkg/resourceinterpreter" @@ -153,40 +150,15 @@ func (c *RBApplicationFailoverController) syncBinding(ctx context.Context, bindi } func (c *RBApplicationFailoverController) evictBinding(binding *workv1alpha2.ResourceBinding, clusters []string) error { + clustersBeforeFailover := getClusterNamesFromTargetClusters(binding.Spec.Clusters) for _, cluster := range clusters { - switch binding.Spec.Failover.Application.PurgeMode { - case policyv1alpha1.Graciously: - if features.FeatureGate.Enabled(features.GracefulEviction) { - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Graciously), - workv1alpha2.WithProducer(RBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), - workv1alpha2.WithGracePeriodSeconds(binding.Spec.Failover.Application.GracePeriodSeconds))) - } else { - err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Graciously) - klog.Error(err) - return err - } - case policyv1alpha1.Never: - if features.FeatureGate.Enabled(features.GracefulEviction) { - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Never), - workv1alpha2.WithProducer(RBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure), - workv1alpha2.WithSuppressDeletion(ptr.To[bool](true)))) - } else { - err := fmt.Errorf("GracefulEviction featureGate must be enabled when purgeMode is %s", policyv1alpha1.Never) - klog.Error(err) - return err - } - case policyv1alpha1.Immediately: - binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions( - workv1alpha2.WithPurgeMode(policyv1alpha1.Immediately), - workv1alpha2.WithProducer(RBApplicationFailoverControllerName), - workv1alpha2.WithReason(workv1alpha2.EvictionReasonApplicationFailure))) + taskOpts, err := buildTaskOptions(binding.Spec.Failover.Application, binding.Status.AggregatedStatus, cluster, RBApplicationFailoverControllerName, clustersBeforeFailover) + if err != nil { + klog.Errorf("failed to build TaskOptions for ResourceBinding(%s/%s) under Cluster(%s): %v", binding.Namespace, binding.Name, cluster, err) + return err } + binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions(taskOpts...)) } - return nil } diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 0ae799c5d472..cf11b4932051 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -6980,6 +6980,21 @@ func schema_pkg_apis_work_v1alpha2_GracefulEvictionTask(ref common.ReferenceCall Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, + "clusterBeforeFailover": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterBeforeFailover records the clusters where running the application before failover.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"fromCluster", "reason", "producer"}, }, @@ -7517,8 +7532,37 @@ func schema_pkg_apis_work_v1alpha2_TaskOptions(ref common.ReferenceCallback) com Format: "", }, }, + "preservedLabelState": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "clustersBeforeFailover": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, - Required: []string{"purgeMode", "producer", "reason", "message", "gracePeriodSeconds", "suppressDeletion"}, + Required: []string{"purgeMode", "producer", "reason", "message", "gracePeriodSeconds", "suppressDeletion", "preservedLabelState", "clustersBeforeFailover"}, }, }, }