diff --git a/api/types/runner_group.go b/api/types/runner_group.go index c47f105..3ba5abf 100644 --- a/api/types/runner_group.go +++ b/api/types/runner_group.go @@ -19,4 +19,8 @@ type RunnerGroupSpec struct { NodeAffinity map[string][]string `json:"nodeAffinity,omitempty" yaml:"nodeAffinity"` // ServiceAccount is the name of the ServiceAccount to use to run runners. ServiceAccount *string `json:"serviceAccount,omitempty" yaml:"serviceAccount"` + // OwnerReference is to mark the runner group depending on this object. + // + // FORMAT: APIVersion:Kind:Name:UID + OwnerReference *string `json:"ownerReference,omitempty" yaml:"ownerReference"` } diff --git a/cmd/kperf/commands/multirunners/server.go b/cmd/kperf/commands/multirunners/server.go index a18963d..58a15aa 100644 --- a/cmd/kperf/commands/multirunners/server.go +++ b/cmd/kperf/commands/multirunners/server.go @@ -30,6 +30,10 @@ var serverCommand = cli.Command{ Usage: "The runner's conainer image", Required: true, }, + cli.StringFlag{ + Name: "runner-owner", + Usage: "The runners depend on this object (FORMAT: APIServer:Kind:Name:UID)", + }, cli.StringSliceFlag{ Name: "address", Usage: "Address for the server", @@ -79,6 +83,11 @@ func buildRunnerGroupHandlers(cliCtx *cli.Context, serverName string) ([]*runner imgRef := cliCtx.String("runner-image") namespace := cliCtx.String("namespace") + ownerRef := "" + if cliCtx.IsSet("runner-owner") { + ownerRef = cliCtx.String("runner-owner") + } + groups := make([]*runnergroup.Handler, 0, len(specURIs)) for idx, specURI := range specURIs { spec, err := runnergroup.NewRunnerGroupSpecFromURI(clientset, specURI) @@ -86,6 +95,10 @@ func buildRunnerGroupHandlers(cliCtx *cli.Context, serverName string) ([]*runner return nil, err } + if ownerRef != "" { + spec.OwnerReference = &ownerRef + } + groupName := fmt.Sprintf("%s-%d", serverName, idx) g, err := runnergroup.NewHandler(clientset, namespace, groupName, spec, imgRef) if err != nil { diff --git a/runner/group/handler.go b/runner/group/handler.go index 71fa2dd..133b496 100644 --- a/runner/group/handler.go +++ b/runner/group/handler.go @@ -16,6 +16,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + apitypes "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -34,7 +35,8 @@ type Handler struct { name string namespace string - spec *types.RunnerGroupSpec + spec *types.RunnerGroupSpec + ownerRef *metav1.OwnerReference // FIXME(weifu): should we migrate this field into RunnerGroupSpec? imageRef string @@ -49,10 +51,16 @@ func NewHandler( spec *types.RunnerGroupSpec, imageRef string, ) (*Handler, error) { + ownRef, err := buildOwnerReference(spec.OwnerReference) + if err != nil { + return nil, err + } + return &Handler{ name: name, namespace: namespace, spec: spec, + ownerRef: ownRef, imageRef: imageRef, clientset: clientset, }, nil @@ -108,6 +116,9 @@ func (h *Handler) uploadLoadProfileAsConfigMap(ctx context.Context) error { configMapDataKeyLoadProfile: string(raw), }, } + if h.ownerRef != nil { + cm.OwnerReferences = append(cm.OwnerReferences, *h.ownerRef) + } _, err = cli.Create(ctx, cm, metav1.CreateOptions{}) return err } @@ -322,6 +333,10 @@ func (h *Handler) buildBatchJobObject(uploadURL string) *batchv1.Job { }, } + if h.ownerRef != nil { + job.OwnerReferences = append(job.OwnerReferences, *h.ownerRef) + } + job.Spec.Template.Spec = corev1.PodSpec{ Affinity: &corev1.Affinity{}, Containers: []corev1.Container{ @@ -424,6 +439,25 @@ func (h *Handler) buildBatchJobObject(uploadURL string) *batchv1.Job { return job } +func buildOwnerReference(ref *string) (*metav1.OwnerReference, error) { + if ref == nil { + return nil, nil + } + + tokens := strings.SplitN(*ref, ":", 4) + if len(tokens) != 4 { + return nil, fmt.Errorf("%s own reference is not apiVersion:kind:name:uid format", *ref) + } + + return &metav1.OwnerReference{ + APIVersion: tokens[0], + Kind: tokens[1], + Name: tokens[2], + UID: apitypes.UID(tokens[3]), + Controller: toPtr(true), + }, nil +} + func jobFinished(job *batchv1.Job) bool { return job.Status.Failed+job.Status.Succeeded == *job.Spec.Completions }