diff --git a/pkg/registration/hub/gc/controller.go b/pkg/registration/hub/gc/controller.go index 2f5b390a3..756de0f7b 100644 --- a/pkg/registration/hub/gc/controller.go +++ b/pkg/registration/hub/gc/controller.go @@ -7,13 +7,14 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" - operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" - "k8s.io/apimachinery/pkg/api/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" rbacv1listers "k8s.io/client-go/listers/rbac/v1" "k8s.io/client-go/metadata" "k8s.io/klog/v2" @@ -47,21 +48,22 @@ var ( // gcReconciler is an interface for reconcile cleanup logic after cluster is deleted. // clusterName is from the queueKey, cluster may be nil. type gcReconciler interface { - reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) + reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster, clusterNamespace *corev1.Namespace) (gcReconcileOp, error) } type GCController struct { - clusterLister clusterv1listers.ManagedClusterLister - clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] - gcReconcilers []gcReconciler + clusterLister clusterv1listers.ManagedClusterLister + namespaceLister corelisters.NamespaceLister + clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] + gcReconcilers []gcReconciler } // NewGCController ensures the related resources are cleaned up after cluster is deleted func NewGCController( clusterRoleLister rbacv1listers.ClusterRoleLister, - clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister, roleBindingLister rbacv1listers.RoleBindingLister, clusterInformer informerv1.ManagedClusterInformer, + namespaceLister corelisters.NamespaceLister, manifestWorkLister worklister.ManifestWorkLister, clusterClient clientset.Interface, kubeClient kubernetes.Interface, @@ -76,9 +78,10 @@ func NewGCController( clusterClient.ClusterV1().ManagedClusters()) controller := &GCController{ - clusterLister: clusterInformer.Lister(), - clusterPatcher: clusterPatcher, - gcReconcilers: []gcReconciler{}, + clusterLister: clusterInformer.Lister(), + namespaceLister: namespaceLister, + clusterPatcher: clusterPatcher, + gcReconcilers: []gcReconciler{}, } // do not clean resources if featureGate is disabled or no gc resource list for backwards compatible. @@ -98,9 +101,8 @@ func NewGCController( } controller.gcReconcilers = append(controller.gcReconcilers, - newGCClusterRbacController(kubeClient, clusterPatcher, clusterInformer, clusterRoleLister, - clusterRoleBindingLister, roleBindingLister, manifestWorkLister, approver, eventRecorder, - resourceCleanupFeatureGateEnable)) + newGCClusterRbacController(kubeClient, clusterPatcher, clusterRoleLister, roleBindingLister, + manifestWorkLister, approver, eventRecorder, resourceCleanupFeatureGateEnable)) return factory.New(). WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()). @@ -109,26 +111,43 @@ func NewGCController( // gc controller is watching cluster and to do these jobs: // 1. add a cleanup finalizer to managedCluster if the cluster is not deleting. -// 2. clean up all rbac and resources in the cluster ns after the cluster is deleted. +// 2. clean up all rolebinding and resources in the cluster ns after the cluster is deleted. func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncContext) error { clusterName := controllerContext.QueueKey() if clusterName == "" || clusterName == factory.DefaultQueueKey { return nil } - originalCluster, err := r.clusterLister.Get(clusterName) - switch { - case errors.IsNotFound(err): - return nil - case err != nil: + // cluster could be nil, that means the cluster is gone but the gc is not finished. + cluster, err := r.clusterLister.Get(clusterName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if cluster != nil && cluster.DeletionTimestamp.IsZero() { + _, err := r.clusterPatcher.AddFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) return err } - cluster := originalCluster.DeepCopy() + // clusterNamespace could be nil, that means there is no resources which are waiting for cleanup. + clusterNamespace, err := r.namespaceLister.Get(clusterName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if cluster == nil && clusterNamespace == nil { + return nil + } + + var copyCluster *clusterv1.ManagedCluster + if cluster != nil { + copyCluster = cluster.DeepCopy() + } + var errs []error var requeue bool for _, reconciler := range r.gcReconcilers { - op, err := reconciler.reconcile(ctx, cluster) + op, err := reconciler.reconcile(ctx, copyCluster, clusterNamespace) if err != nil { errs = append(errs, err) } @@ -139,10 +158,19 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC break } } - // update cluster condition firstly + + if requeue { + controllerContext.Queue().AddAfter(clusterName, 1*time.Second) + } + + if cluster == nil { + return utilerrors.NewAggregate(errs) + } + + // update cluster condition if len(errs) != 0 { - applyErrors := operatorhelpers.NewMultiLineAggregate(errs) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + applyErrors := utilerrors.NewAggregate(errs) + meta.SetStatusCondition(©Cluster.Status.Conditions, metav1.Condition{ Type: clusterv1.ManagedClusterConditionDeleting, Status: metav1.ConditionFalse, Reason: clusterv1.ConditionDeletingReasonResourceError, @@ -150,12 +178,13 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC }) } - if _, err = r.clusterPatcher.PatchStatus(ctx, cluster, cluster.Status, originalCluster.Status); err != nil { + if _, err = r.clusterPatcher.PatchStatus(ctx, cluster, copyCluster.Status, cluster.Status); err != nil { errs = append(errs, err) } - if requeue { - controllerContext.Queue().AddAfter(clusterName, 1*time.Second) + if len(errs) != 0 || requeue { + return utilerrors.NewAggregate(errs) } - return utilerrors.NewAggregate(errs) + + return r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) } diff --git a/pkg/registration/hub/gc/controller_test.go b/pkg/registration/hub/gc/controller_test.go index f874fdeac..ea1b8b910 100644 --- a/pkg/registration/hub/gc/controller_test.go +++ b/pkg/registration/hub/gc/controller_test.go @@ -7,11 +7,14 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" kubeinformers "k8s.io/client-go/informers" fakeclient "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" fakemetadataclient "k8s.io/client-go/metadata/fake" + clienttesting "k8s.io/client-go/testing" fakeclusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" @@ -27,27 +30,49 @@ import ( func TestGController(t *testing.T) { cases := []struct { - name string - key string - cluster *clusterv1.ManagedCluster - expectedErr string + name string + key string + cluster *clusterv1.ManagedCluster + namesapce *corev1.Namespace + expectedErr string + validateActions func(t *testing.T, clusterActions []clienttesting.Action) }{ { name: "invalid key", key: factory.DefaultQueueKey, cluster: testinghelpers.NewDeletingManagedCluster(), expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertNoActions(t, clusterActions) + }, }, { - name: "valid key", + name: "valid key no namespace", key: testinghelpers.TestManagedClusterName, cluster: testinghelpers.NewDeletingManagedCluster(), expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, clusterActions, "patch") + }, + }, + { + name: "valid key with cluster and namespace", + key: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + namesapce: newNamespace(testinghelpers.TestManagedClusterName), + expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, clusterActions, "patch", "patch") + }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - kubeClient := fakeclient.NewSimpleClientset() + objs := []runtime.Object{} + if c.namesapce != nil { + objs = append(objs, c.namesapce) + } + kubeClient := fakeclient.NewSimpleClientset(objs...) kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Minute*10) metadataClient := fakemetadataclient.NewSimpleMetadataClient(scheme.Scheme) @@ -66,9 +91,9 @@ func TestGController(t *testing.T) { _ = NewGCController( kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), clusterInformerFactory.Cluster().V1().ManagedClusters(), + kubeInformerFactory.Core().V1().Namespaces().Lister(), workInformerFactory.Work().V1().ManifestWorks().Lister(), clusterClient, kubeClient, @@ -79,7 +104,12 @@ func TestGController(t *testing.T) { "work.open-cluster-management.io/v1/manifestworks"}, true, ) - + namespaceStore := kubeInformerFactory.Core().V1().Namespaces().Informer().GetStore() + if c.namesapce != nil { + if err := namespaceStore.Add(c.namesapce); err != nil { + t.Fatal(err) + } + } clusterPatcher := patcher.NewPatcher[ *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( clusterClient.ClusterV1().ManagedClusters()) @@ -91,20 +121,20 @@ func TestGController(t *testing.T) { newGCResourcesController(metadataClient, []schema.GroupVersionResource{addonGvr, workGvr}, events.NewInMemoryRecorder("")), newGCClusterRbacController(kubeClient, clusterPatcher, - clusterInformerFactory.Cluster().V1().ManagedClusters(), kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), workInformerFactory.Work().V1().ManifestWorks().Lister(), register.NewNoopApprover(), events.NewInMemoryRecorder(""), true), }, + namespaceLister: kubeInformerFactory.Core().V1().Namespaces().Lister(), } controllerContext := testingcommon.NewFakeSyncContext(t, c.key) err := ctrl.sync(context.TODO(), controllerContext) testingcommon.AssertError(t, err, c.expectedErr) + c.validateActions(t, clusterClient.Actions()) }) } } diff --git a/pkg/registration/hub/gc/gc_cluster_rbac.go b/pkg/registration/hub/gc/gc_cluster_rbac.go index b8d39f4e4..fad7e811a 100644 --- a/pkg/registration/hub/gc/gc_cluster_rbac.go +++ b/pkg/registration/hub/gc/gc_cluster_rbac.go @@ -7,15 +7,13 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" rbacv1listers "k8s.io/client-go/listers/rbac/v1" - "k8s.io/klog/v2" - informerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" - clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1" worklister "open-cluster-management.io/api/client/work/listers/work/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" "open-cluster-management.io/sdk-go/pkg/patcher" @@ -46,9 +44,7 @@ var ( type gcClusterRbacController struct { kubeClient kubernetes.Interface - clusterLister clusterv1listers.ManagedClusterLister clusterRoleLister rbacv1listers.ClusterRoleLister - clusterRoleBingLister rbacv1listers.ClusterRoleBindingLister roleBindingLister rbacv1listers.RoleBindingLister manifestWorkLister worklister.ManifestWorkLister clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] @@ -61,9 +57,7 @@ type gcClusterRbacController struct { func newGCClusterRbacController( kubeClient kubernetes.Interface, clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus], - clusterInformer informerv1.ManagedClusterInformer, clusterRoleLister rbacv1listers.ClusterRoleLister, - clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister, roleBindingLister rbacv1listers.RoleBindingLister, manifestWorkLister worklister.ManifestWorkLister, approver register.Approver, @@ -73,9 +67,7 @@ func newGCClusterRbacController( return &gcClusterRbacController{ kubeClient: kubeClient, - clusterLister: clusterInformer.Lister(), clusterRoleLister: clusterRoleLister, - clusterRoleBingLister: clusterRoleBindingLister, roleBindingLister: roleBindingLister, manifestWorkLister: manifestWorkLister, clusterPatcher: clusterPatcher, @@ -85,43 +77,42 @@ func newGCClusterRbacController( } } -func (r *gcClusterRbacController) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) { - if cluster.DeletionTimestamp.IsZero() { - _, err := r.clusterPatcher.AddFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) - return gcReconcileStop, err +func (r *gcClusterRbacController) reconcile(ctx context.Context, + cluster *clusterv1.ManagedCluster, clusterNamespace *corev1.Namespace) (gcReconcileOp, error) { + if cluster == nil && clusterNamespace == nil { + return gcReconcileStop, nil } - if err := r.removeClusterRbac(ctx, cluster.Name, cluster.Spec.HubAcceptsClient); err != nil { - return gcReconcileContinue, err - } - - if err := r.approver.Cleanup(ctx, cluster); err != nil { - return gcReconcileContinue, err - } + if cluster != nil { + if err := r.removeClusterRbac(ctx, cluster.Name, cluster.Spec.HubAcceptsClient); err != nil { + return gcReconcileContinue, err + } - works, err := r.manifestWorkLister.ManifestWorks(cluster.Name).List(labels.Everything()) - if err != nil && !errors.IsNotFound(err) { - return gcReconcileStop, err - } - if len(works) != 0 { - klog.V(2).Infof("cluster %s is deleting, waiting %d works in the cluster namespace to be deleted.", - cluster.Name, len(works)) + if err := r.approver.Cleanup(ctx, cluster); err != nil { + return gcReconcileContinue, err + } - // remove finalizer to delete the cluster for backwards compatible. + // if GC feature gate is disable, the finalizer is removed from the cluster after the related rbac is deleted. + // there is no need to wait other resources are cleaned up before remove the finalizer. if !r.resourceCleanupFeatureGateEnable { - return gcReconcileStop, r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) + if err := r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer); err != nil { + return gcReconcileStop, err + } } - return gcReconcileRequeue, nil } - if err = r.removeFinalizerFromWorkRoleBinding(ctx, cluster.Name, manifestWorkFinalizer); err != nil { - return gcReconcileStop, err + if clusterNamespace != nil { + works, err := r.manifestWorkLister.ManifestWorks(clusterNamespace.Name).List(labels.Everything()) + if err != nil && !errors.IsNotFound(err) { + return gcReconcileStop, err + } + if len(works) != 0 { + return gcReconcileRequeue, nil + } + return gcReconcileStop, r.removeFinalizerFromWorkRoleBinding(ctx, clusterNamespace.Name, manifestWorkFinalizer) } - r.eventRecorder.Eventf("ManagedClusterGC", - "managed cluster %s is deleting and the cluster rbac are deleted", cluster.Name) - - return gcReconcileContinue, r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) + return gcReconcileStop, nil } func (r *gcClusterRbacController) removeClusterRbac(ctx context.Context, clusterName string, accepted bool) error { diff --git a/pkg/registration/hub/gc/gc_cluster_rbac_test.go b/pkg/registration/hub/gc/gc_cluster_rbac_test.go index 32da6727e..46f731dce 100644 --- a/pkg/registration/hub/gc/gc_cluster_rbac_test.go +++ b/pkg/registration/hub/gc/gc_cluster_rbac_test.go @@ -7,6 +7,8 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" kubeinformers "k8s.io/client-go/informers" fakeclient "k8s.io/client-go/kubernetes/fake" @@ -27,37 +29,68 @@ import ( func TestGCClusterRbacController(t *testing.T) { cases := []struct { - name string - cluster *clusterv1.ManagedCluster - works []*workv1.ManifestWork - workRoleBinding runtime.Object - expectedOp gcReconcileOp - validateActions func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) + name string + cluster *clusterv1.ManagedCluster + namespace *corev1.Namespace + works []*workv1.ManifestWork + workRoleBinding runtime.Object + resourceCleanupFeatureGateEnable bool + expectedOp gcReconcileOp + validateActions func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) }{ { - name: "add finalizer to the mcl", - cluster: testinghelpers.NewManagedCluster(), + name: "no cluster and ns, do nothing", expectedOp: gcReconcileStop, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, clusterActions, "patch") testingcommon.AssertNoActions(t, kubeActions) }, }, { - name: "delete rbac with no works", - cluster: testinghelpers.NewDeletingManagedCluster(), - workRoleBinding: testinghelpers.NewRoleBinding(testinghelpers.TestManagedClusterName, - workRoleBindingName(testinghelpers.TestManagedClusterName), []string{manifestWorkFinalizer}, - map[string]string{clusterv1.ClusterNameLabelKey: testinghelpers.TestManagedClusterName}, true), - expectedOp: gcReconcileContinue, + name: "cluster is nil and have works, Requeue", + namespace: newNamespace("cluster1"), + works: []*workv1.ManifestWork{ + testinghelpers.NewManifestWork("cluster1", "test", nil, nil, nil, nil), + }, + expectedOp: gcReconcileRequeue, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete", "patch") + testingcommon.AssertNoActions(t, kubeActions) + }, + }, + { + name: "cluster is nil and no works, remove finalizer from work rolebinding", + namespace: newNamespace("cluster1"), + expectedOp: gcReconcileStop, + workRoleBinding: testinghelpers.NewRoleBinding("cluster1", + workRoleBindingName("cluster1"), []string{manifestWorkFinalizer}, + map[string]string{clusterv1.ClusterNameLabelKey: "cluster1"}, true), + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "patch") + }, + }, + { + name: "gc is disable, ns is nil, delete rbac and remove finalizer from cluster", + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: false, + expectedOp: gcReconcileStop, + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") testingcommon.AssertActions(t, clusterActions, "patch") }, }, { - name: "delete rbac with works", - cluster: testinghelpers.NewDeletingManagedCluster(), + name: "gc is enable, ns is nil, delete rbac ", + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: true, + expectedOp: gcReconcileStop, + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") + }, + }, + { + name: "gc is disable with works", + cluster: testinghelpers.NewDeletingManagedCluster(), + namespace: newNamespace(testinghelpers.TestManagedClusterName), + resourceCleanupFeatureGateEnable: false, works: []*workv1.ManifestWork{ testinghelpers.NewManifestWork(testinghelpers.TestManagedClusterName, "test", nil, nil, nil, nil), }, @@ -68,32 +101,51 @@ func TestGCClusterRbacController(t *testing.T) { expectedOp: gcReconcileRequeue, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") - testingcommon.AssertNoActions(t, clusterActions) + testingcommon.AssertActions(t, clusterActions, "patch") }, }, { - name: "remove finalizer of mcl", - cluster: testinghelpers.NewDeletingManagedCluster(), - expectedOp: gcReconcileContinue, + name: "gc is disable with no works", + cluster: testinghelpers.NewDeletingManagedCluster(), + namespace: newNamespace(testinghelpers.TestManagedClusterName), + resourceCleanupFeatureGateEnable: false, + workRoleBinding: testinghelpers.NewRoleBinding(testinghelpers.TestManagedClusterName, + workRoleBindingName(testinghelpers.TestManagedClusterName), []string{manifestWorkFinalizer}, + map[string]string{clusterv1.ClusterNameLabelKey: testinghelpers.TestManagedClusterName}, true), + + expectedOp: gcReconcileStop, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete", "patch") testingcommon.AssertActions(t, clusterActions, "patch") }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - mclClusterRole := testinghelpers.NewClusterRole( - mclClusterRoleName(c.cluster.Name), - []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) - mclClusterRoleBinding := testinghelpers.NewClusterRoleBinding( - mclClusterRoleBindingName(c.cluster.Name), - []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) - registrationRoleBinding := testinghelpers.NewRoleBinding(c.cluster.Name, - registrationRoleBindingName(c.cluster.Name), - []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) - - objs := []runtime.Object{mclClusterRole, mclClusterRoleBinding, registrationRoleBinding} + objs := []runtime.Object{} + clusterName := "" + if c.cluster != nil { + clusterName = c.cluster.Name + } else if c.namespace != nil { + clusterName = c.namespace.Name + } + + if clusterName != "" { + mclClusterRole := testinghelpers.NewClusterRole( + mclClusterRoleName(clusterName), + []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) + mclClusterRoleBinding := testinghelpers.NewClusterRoleBinding( + mclClusterRoleBindingName(clusterName), + []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) + objs = append(objs, mclClusterRole, mclClusterRoleBinding) + } + if c.namespace != nil { + registrationRoleBinding := testinghelpers.NewRoleBinding(c.namespace.Name, + registrationRoleBindingName(c.namespace.Name), + []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) + objs = append(objs, registrationRoleBinding) + } + if c.workRoleBinding != nil { objs = append(objs, c.workRoleBinding) } @@ -107,16 +159,20 @@ func TestGCClusterRbacController(t *testing.T) { } } - clusterClient := fakeclusterclient.NewSimpleClientset(c.cluster) - clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10) - clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() - if err := clusterStore.Add(c.cluster); err != nil { - t.Fatal(err) - } + var clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] + var clusterClient *fakeclusterclient.Clientset + if c.cluster != nil { + clusterClient = fakeclusterclient.NewSimpleClientset(c.cluster) + clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10) + clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() + if err := clusterStore.Add(c.cluster); err != nil { + t.Fatal(err) + } - clusterPatcher := patcher.NewPatcher[ - *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( - clusterClient.ClusterV1().ManagedClusters()) + clusterPatcher = patcher.NewPatcher[ + *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( + clusterClient.ClusterV1().ManagedClusters()) + } workClient := fakeworkclient.NewSimpleClientset() workInformerFactory := workinformers.NewSharedInformerFactory(workClient, 5*time.Minute) @@ -130,32 +186,40 @@ func TestGCClusterRbacController(t *testing.T) { _ = newGCClusterRbacController( kubeClient, clusterPatcher, - clusterInformerFactory.Cluster().V1().ManagedClusters(), kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), workInformerFactory.Work().V1().ManifestWorks().Lister(), register.NewNoopApprover(), events.NewInMemoryRecorder(""), - true, + c.resourceCleanupFeatureGateEnable, ) ctrl := &gcClusterRbacController{ kubeClient: kubeClient, - clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(), clusterRoleLister: kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - clusterRoleBingLister: kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), roleBindingLister: kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), manifestWorkLister: workInformerFactory.Work().V1().ManifestWorks().Lister(), clusterPatcher: clusterPatcher, approver: register.NewNoopApprover(), eventRecorder: events.NewInMemoryRecorder(""), - resourceCleanupFeatureGateEnable: true, + resourceCleanupFeatureGateEnable: c.resourceCleanupFeatureGateEnable, } - op, err := ctrl.reconcile(context.TODO(), c.cluster) + op, err := ctrl.reconcile(context.TODO(), c.cluster, c.namespace) testingcommon.AssertError(t, err, "") assert.Equal(t, op, c.expectedOp) - c.validateActions(t, kubeClient.Actions(), clusterClient.Actions()) + if clusterClient != nil { + c.validateActions(t, kubeClient.Actions(), clusterClient.Actions()) + } else { + c.validateActions(t, kubeClient.Actions(), nil) + } }) } } + +func newNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} diff --git a/pkg/registration/hub/gc/gc_resources.go b/pkg/registration/hub/gc/gc_resources.go index ddfc6dc8c..b43f69e66 100644 --- a/pkg/registration/hub/gc/gc_resources.go +++ b/pkg/registration/hub/gc/gc_resources.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,17 +47,18 @@ func newGCResourcesController( } } -func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) { +func (r *gcResourcesController) reconcile(ctx context.Context, + cluster *clusterv1.ManagedCluster, clusterNamespace *corev1.Namespace) (gcReconcileOp, error) { var errs []error - if cluster.DeletionTimestamp.IsZero() { + if clusterNamespace == nil { return gcReconcileContinue, nil } // delete the resources in order. to delete the next resource after all resource instances are deleted. for _, resourceGVR := range r.resourceGVRList { resourceList, err := r.metadataClient.Resource(resourceGVR). - Namespace(cluster.Name).List(ctx, metav1.ListOptions{}) + Namespace(clusterNamespace.Name).List(ctx, metav1.ListOptions{}) if errors.IsNotFound(err) { continue } @@ -68,21 +70,23 @@ func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv continue } - remainingCnt, finalizerPendingCnt := r.RemainingCnt(resourceList) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: clusterv1.ManagedClusterConditionDeleting, - Status: metav1.ConditionFalse, - Reason: clusterv1.ConditionDeletingReasonResourceRemaining, - Message: fmt.Sprintf("The resource %v is remaning, the remaining count is %v, "+ - "the finalizer pending count is %v", resourceGVR.Resource, remainingCnt, finalizerPendingCnt), - }) + if cluster != nil { + remainingCnt, finalizerPendingCnt := r.RemainingCnt(resourceList) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: clusterv1.ManagedClusterConditionDeleting, + Status: metav1.ConditionFalse, + Reason: clusterv1.ConditionDeletingReasonResourceRemaining, + Message: fmt.Sprintf("The resource %v is remaning, the remaining count is %v, "+ + "the finalizer pending count is %v", resourceGVR.Resource, remainingCnt, finalizerPendingCnt), + }) + } // sort the resources by priority, and then find the lowest priority. priorityResourceMap := mapPriorityResource(resourceList) firstDeletePriority := getFirstDeletePriority(priorityResourceMap) // delete the resource instances with the lowest priority in one reconciling. for _, resourceName := range priorityResourceMap[firstDeletePriority] { - err = r.metadataClient.Resource(resourceGVR).Namespace(cluster.Name). + err = r.metadataClient.Resource(resourceGVR).Namespace(clusterNamespace.Name). Delete(ctx, resourceName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { errs = append(errs, err) @@ -95,12 +99,14 @@ func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv return gcReconcileRequeue, nil } - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: clusterv1.ManagedClusterConditionDeleting, - Status: metav1.ConditionTrue, - Reason: clusterv1.ConditionDeletingReasonNoResource, - Message: "No cleaned resource in cluster ns.", - }) + if cluster != nil { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: clusterv1.ManagedClusterConditionDeleting, + Status: metav1.ConditionTrue, + Reason: clusterv1.ConditionDeletingReasonNoResource, + Message: "No cleaned resource in cluster ns.", + }) + } return gcReconcileContinue, nil } diff --git a/pkg/registration/hub/gc/gc_resources_test.go b/pkg/registration/hub/gc/gc_resources_test.go index dcb4b87ff..85c50dfa3 100644 --- a/pkg/registration/hub/gc/gc_resources_test.go +++ b/pkg/registration/hub/gc/gc_resources_test.go @@ -6,6 +6,7 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -24,13 +25,14 @@ func TestGCResourcesController(t *testing.T) { cases := []struct { name string cluster *clusterv1.ManagedCluster + namespace *corev1.Namespace objs []runtime.Object expectedOp gcReconcileOp expectedErr error validateActions func(t *testing.T, kubeActions []clienttesting.Action) }{ { - name: "do nothing if the cluster is not deleting", + name: "do nothing if no ns", cluster: testinghelpers.NewManagedCluster(), expectedOp: gcReconcileContinue, expectedErr: nil, @@ -39,8 +41,9 @@ func TestGCResourcesController(t *testing.T) { }, }, { - name: "delete addon if the cluster is deleting", + name: "delete addon", cluster: testinghelpers.NewDeletingManagedCluster(), + namespace: newNamespace(testinghelpers.TestManagedClusterName), objs: []runtime.Object{newAddonMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, expectedOp: gcReconcileRequeue, expectedErr: nil, @@ -49,8 +52,9 @@ func TestGCResourcesController(t *testing.T) { }, }, { - name: "delete work if the cluster is deleting", + name: "delete work ", cluster: testinghelpers.NewDeletingManagedCluster(), + namespace: newNamespace(testinghelpers.TestManagedClusterName), objs: []runtime.Object{newWorkMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, expectedOp: gcReconcileRequeue, expectedErr: nil, @@ -75,7 +79,7 @@ func TestGCResourcesController(t *testing.T) { eventRecorder: events.NewInMemoryRecorder(""), } - op, err := ctrl.reconcile(context.TODO(), c.cluster) + op, err := ctrl.reconcile(context.TODO(), c.cluster, c.namespace) assert.Equal(t, c.expectedErr, err) assert.Equal(t, c.expectedOp, op) c.validateActions(t, metadataClient.Actions()) diff --git a/pkg/registration/hub/manager.go b/pkg/registration/hub/manager.go index 299bde4d2..afaf9b111 100644 --- a/pkg/registration/hub/manager.go +++ b/pkg/registration/hub/manager.go @@ -272,9 +272,9 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers( gcController := gc.NewGCController( kubeInformers.Rbac().V1().ClusterRoles().Lister(), - kubeInformers.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformers.Rbac().V1().RoleBindings().Lister(), clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Core().V1().Namespaces().Lister(), workInformers.Work().V1().ManifestWorks().Lister(), clusterClient, kubeClient,