diff --git a/pkg/registration/hub/gc/controller.go b/pkg/registration/hub/gc/controller.go index 2f5b390a3..53c9bca9f 100644 --- a/pkg/registration/hub/gc/controller.go +++ b/pkg/registration/hub/gc/controller.go @@ -7,8 +7,7 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" - operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -47,7 +46,7 @@ var ( // gcReconciler is an interface for reconcile cleanup logic after cluster is deleted. // clusterName is from the queueKey, cluster may be nil. type gcReconciler interface { - reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) + reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster, clusterNamespace string) (gcReconcileOp, error) } type GCController struct { @@ -59,7 +58,6 @@ type GCController struct { // NewGCController ensures the related resources are cleaned up after cluster is deleted func NewGCController( clusterRoleLister rbacv1listers.ClusterRoleLister, - clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister, roleBindingLister rbacv1listers.RoleBindingLister, clusterInformer informerv1.ManagedClusterInformer, manifestWorkLister worklister.ManifestWorkLister, @@ -98,9 +96,8 @@ func NewGCController( } controller.gcReconcilers = append(controller.gcReconcilers, - newGCClusterRbacController(kubeClient, clusterPatcher, clusterInformer, clusterRoleLister, - clusterRoleBindingLister, roleBindingLister, manifestWorkLister, approver, eventRecorder, - resourceCleanupFeatureGateEnable)) + newGCClusterRbacController(kubeClient, clusterPatcher, clusterRoleLister, roleBindingLister, + manifestWorkLister, approver, eventRecorder, resourceCleanupFeatureGateEnable)) return factory.New(). WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()). @@ -109,26 +106,33 @@ func NewGCController( // gc controller is watching cluster and to do these jobs: // 1. add a cleanup finalizer to managedCluster if the cluster is not deleting. -// 2. clean up all rbac and resources in the cluster ns after the cluster is deleted. +// 2. clean up all rolebinding and resources in the cluster ns after the cluster is deleted. func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncContext) error { clusterName := controllerContext.QueueKey() if clusterName == "" || clusterName == factory.DefaultQueueKey { return nil } - originalCluster, err := r.clusterLister.Get(clusterName) - switch { - case errors.IsNotFound(err): - return nil - case err != nil: + // cluster could be nil, that means the cluster is gone but the gc is not finished. + cluster, err := r.clusterLister.Get(clusterName) + if err != nil && !apierrors.IsNotFound(err) { return err } - cluster := originalCluster.DeepCopy() + var copyCluster *clusterv1.ManagedCluster + if cluster != nil { + if cluster.DeletionTimestamp.IsZero() { + _, err = r.clusterPatcher.AddFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) + return err + } + + copyCluster = cluster.DeepCopy() + } + var errs []error var requeue bool for _, reconciler := range r.gcReconcilers { - op, err := reconciler.reconcile(ctx, cluster) + op, err := reconciler.reconcile(ctx, copyCluster, clusterName) if err != nil { errs = append(errs, err) } @@ -139,10 +143,19 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC break } } - // update cluster condition firstly + + if requeue { + controllerContext.Queue().AddAfter(clusterName, 5*time.Second) + } + + if cluster == nil { + return utilerrors.NewAggregate(errs) + } + + // update cluster condition if len(errs) != 0 { - applyErrors := operatorhelpers.NewMultiLineAggregate(errs) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + applyErrors := utilerrors.NewAggregate(errs) + meta.SetStatusCondition(©Cluster.Status.Conditions, metav1.Condition{ Type: clusterv1.ManagedClusterConditionDeleting, Status: metav1.ConditionFalse, Reason: clusterv1.ConditionDeletingReasonResourceError, @@ -150,12 +163,13 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC }) } - if _, err = r.clusterPatcher.PatchStatus(ctx, cluster, cluster.Status, originalCluster.Status); err != nil { + if _, err = r.clusterPatcher.PatchStatus(ctx, cluster, copyCluster.Status, cluster.Status); err != nil { errs = append(errs, err) } - if requeue { - controllerContext.Queue().AddAfter(clusterName, 1*time.Second) + if len(errs) != 0 || requeue { + return utilerrors.NewAggregate(errs) } - return utilerrors.NewAggregate(errs) + + return r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) } diff --git a/pkg/registration/hub/gc/controller_test.go b/pkg/registration/hub/gc/controller_test.go index f874fdeac..75276f74f 100644 --- a/pkg/registration/hub/gc/controller_test.go +++ b/pkg/registration/hub/gc/controller_test.go @@ -7,11 +7,13 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" kubeinformers "k8s.io/client-go/informers" fakeclient "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" fakemetadataclient "k8s.io/client-go/metadata/fake" + clienttesting "k8s.io/client-go/testing" fakeclusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" @@ -27,29 +29,45 @@ import ( func TestGController(t *testing.T) { cases := []struct { - name string - key string - cluster *clusterv1.ManagedCluster - expectedErr string + name string + key string + cluster *clusterv1.ManagedCluster + namespace *corev1.Namespace + expectedErr string + validateActions func(t *testing.T, clusterActions []clienttesting.Action) }{ { name: "invalid key", key: factory.DefaultQueueKey, cluster: testinghelpers.NewDeletingManagedCluster(), expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertNoActions(t, clusterActions) + }, }, { - name: "valid key", + name: "valid key with cluster", key: testinghelpers.TestManagedClusterName, cluster: testinghelpers.NewDeletingManagedCluster(), expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, clusterActions, "patch", "patch") + }, + }, + { + name: "valid key with no cluster ", + key: "cluster1", + cluster: testinghelpers.NewDeletingManagedCluster(), + expectedErr: "", + validateActions: func(t *testing.T, clusterActions []clienttesting.Action) { + testingcommon.AssertNoActions(t, clusterActions) + }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { kubeClient := fakeclient.NewSimpleClientset() kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Minute*10) - metadataClient := fakemetadataclient.NewSimpleMetadataClient(scheme.Scheme) clusterClient := fakeclusterclient.NewSimpleClientset(c.cluster) @@ -66,7 +84,6 @@ func TestGController(t *testing.T) { _ = NewGCController( kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), clusterInformerFactory.Cluster().V1().ManagedClusters(), workInformerFactory.Work().V1().ManifestWorks().Lister(), @@ -79,7 +96,12 @@ func TestGController(t *testing.T) { "work.open-cluster-management.io/v1/manifestworks"}, true, ) - + namespaceStore := kubeInformerFactory.Core().V1().Namespaces().Informer().GetStore() + if c.namespace != nil { + if err := namespaceStore.Add(c.namespace); err != nil { + t.Fatal(err) + } + } clusterPatcher := patcher.NewPatcher[ *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( clusterClient.ClusterV1().ManagedClusters()) @@ -91,9 +113,7 @@ func TestGController(t *testing.T) { newGCResourcesController(metadataClient, []schema.GroupVersionResource{addonGvr, workGvr}, events.NewInMemoryRecorder("")), newGCClusterRbacController(kubeClient, clusterPatcher, - clusterInformerFactory.Cluster().V1().ManagedClusters(), kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), workInformerFactory.Work().V1().ManifestWorks().Lister(), register.NewNoopApprover(), @@ -105,6 +125,7 @@ func TestGController(t *testing.T) { controllerContext := testingcommon.NewFakeSyncContext(t, c.key) err := ctrl.sync(context.TODO(), controllerContext) testingcommon.AssertError(t, err, c.expectedErr) + c.validateActions(t, clusterClient.Actions()) }) } } diff --git a/pkg/registration/hub/gc/gc_cluster_rbac.go b/pkg/registration/hub/gc/gc_cluster_rbac.go index b8d39f4e4..c065faed0 100644 --- a/pkg/registration/hub/gc/gc_cluster_rbac.go +++ b/pkg/registration/hub/gc/gc_cluster_rbac.go @@ -12,10 +12,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" rbacv1listers "k8s.io/client-go/listers/rbac/v1" - "k8s.io/klog/v2" - informerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" - clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1" worklister "open-cluster-management.io/api/client/work/listers/work/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" "open-cluster-management.io/sdk-go/pkg/patcher" @@ -46,9 +43,7 @@ var ( type gcClusterRbacController struct { kubeClient kubernetes.Interface - clusterLister clusterv1listers.ManagedClusterLister clusterRoleLister rbacv1listers.ClusterRoleLister - clusterRoleBingLister rbacv1listers.ClusterRoleBindingLister roleBindingLister rbacv1listers.RoleBindingLister manifestWorkLister worklister.ManifestWorkLister clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] @@ -61,9 +56,7 @@ type gcClusterRbacController struct { func newGCClusterRbacController( kubeClient kubernetes.Interface, clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus], - clusterInformer informerv1.ManagedClusterInformer, clusterRoleLister rbacv1listers.ClusterRoleLister, - clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister, roleBindingLister rbacv1listers.RoleBindingLister, manifestWorkLister worklister.ManifestWorkLister, approver register.Approver, @@ -73,9 +66,7 @@ func newGCClusterRbacController( return &gcClusterRbacController{ kubeClient: kubeClient, - clusterLister: clusterInformer.Lister(), clusterRoleLister: clusterRoleLister, - clusterRoleBingLister: clusterRoleBindingLister, roleBindingLister: roleBindingLister, manifestWorkLister: manifestWorkLister, clusterPatcher: clusterPatcher, @@ -85,43 +76,35 @@ func newGCClusterRbacController( } } -func (r *gcClusterRbacController) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) { - if cluster.DeletionTimestamp.IsZero() { - _, err := r.clusterPatcher.AddFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) - return gcReconcileStop, err - } +func (r *gcClusterRbacController) reconcile(ctx context.Context, + cluster *clusterv1.ManagedCluster, clusterNamespace string) (gcReconcileOp, error) { + if cluster != nil { + if err := r.removeClusterRbac(ctx, cluster.Name, cluster.Spec.HubAcceptsClient); err != nil { + return gcReconcileContinue, err + } - if err := r.removeClusterRbac(ctx, cluster.Name, cluster.Spec.HubAcceptsClient); err != nil { - return gcReconcileContinue, err - } + if err := r.approver.Cleanup(ctx, cluster); err != nil { + return gcReconcileContinue, err + } - if err := r.approver.Cleanup(ctx, cluster); err != nil { - return gcReconcileContinue, err + // if GC feature gate is disable, the finalizer is removed from the cluster after the related rbac is deleted. + // there is no need to wait other resources are cleaned up before remove the finalizer. + if !r.resourceCleanupFeatureGateEnable { + if err := r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer); err != nil { + return gcReconcileStop, err + } + } } - works, err := r.manifestWorkLister.ManifestWorks(cluster.Name).List(labels.Everything()) + works, err := r.manifestWorkLister.ManifestWorks(clusterNamespace).List(labels.Everything()) if err != nil && !errors.IsNotFound(err) { return gcReconcileStop, err } if len(works) != 0 { - klog.V(2).Infof("cluster %s is deleting, waiting %d works in the cluster namespace to be deleted.", - cluster.Name, len(works)) - - // remove finalizer to delete the cluster for backwards compatible. - if !r.resourceCleanupFeatureGateEnable { - return gcReconcileStop, r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) - } return gcReconcileRequeue, nil } - if err = r.removeFinalizerFromWorkRoleBinding(ctx, cluster.Name, manifestWorkFinalizer); err != nil { - return gcReconcileStop, err - } - - r.eventRecorder.Eventf("ManagedClusterGC", - "managed cluster %s is deleting and the cluster rbac are deleted", cluster.Name) - - return gcReconcileContinue, r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer) + return gcReconcileStop, r.removeFinalizerFromWorkRoleBinding(ctx, clusterNamespace, manifestWorkFinalizer) } func (r *gcClusterRbacController) removeClusterRbac(ctx context.Context, clusterName string, accepted bool) error { diff --git a/pkg/registration/hub/gc/gc_cluster_rbac_test.go b/pkg/registration/hub/gc/gc_cluster_rbac_test.go index 32da6727e..eba0d8fdc 100644 --- a/pkg/registration/hub/gc/gc_cluster_rbac_test.go +++ b/pkg/registration/hub/gc/gc_cluster_rbac_test.go @@ -27,37 +27,70 @@ import ( func TestGCClusterRbacController(t *testing.T) { cases := []struct { - name string - cluster *clusterv1.ManagedCluster - works []*workv1.ManifestWork - workRoleBinding runtime.Object - expectedOp gcReconcileOp - validateActions func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) + name string + clusterName string + cluster *clusterv1.ManagedCluster + works []*workv1.ManifestWork + workRoleBinding runtime.Object + resourceCleanupFeatureGateEnable bool + expectedOp gcReconcileOp + validateActions func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) }{ { - name: "add finalizer to the mcl", - cluster: testinghelpers.NewManagedCluster(), + name: "no cluster and work, do nothing", expectedOp: gcReconcileStop, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, clusterActions, "patch") testingcommon.AssertNoActions(t, kubeActions) }, }, { - name: "delete rbac with no works", - cluster: testinghelpers.NewDeletingManagedCluster(), - workRoleBinding: testinghelpers.NewRoleBinding(testinghelpers.TestManagedClusterName, - workRoleBindingName(testinghelpers.TestManagedClusterName), []string{manifestWorkFinalizer}, - map[string]string{clusterv1.ClusterNameLabelKey: testinghelpers.TestManagedClusterName}, true), - expectedOp: gcReconcileContinue, + name: "cluster is nil and have works, Requeue", + clusterName: "cluster1", + works: []*workv1.ManifestWork{ + testinghelpers.NewManifestWork("cluster1", "test", nil, nil, nil, nil), + }, + expectedOp: gcReconcileRequeue, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete", "patch") + testingcommon.AssertNoActions(t, kubeActions) + }, + }, + { + name: "cluster is nil and no works, remove finalizer from work rolebinding", + clusterName: "cluster1", + expectedOp: gcReconcileStop, + workRoleBinding: testinghelpers.NewRoleBinding("cluster1", + workRoleBindingName("cluster1"), []string{manifestWorkFinalizer}, + map[string]string{clusterv1.ClusterNameLabelKey: "cluster1"}, true), + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "patch") + }, + }, + { + name: "gc is disable, delete rbac and remove finalizer from cluster", + clusterName: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: false, + expectedOp: gcReconcileStop, + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") testingcommon.AssertActions(t, clusterActions, "patch") }, }, { - name: "delete rbac with works", - cluster: testinghelpers.NewDeletingManagedCluster(), + name: "gc is enable with no work, delete rbac ", + clusterName: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: true, + expectedOp: gcReconcileStop, + validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") + }, + }, + { + name: "gc is disable with works", + clusterName: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: false, works: []*workv1.ManifestWork{ testinghelpers.NewManifestWork(testinghelpers.TestManagedClusterName, "test", nil, nil, nil, nil), }, @@ -68,32 +101,41 @@ func TestGCClusterRbacController(t *testing.T) { expectedOp: gcReconcileRequeue, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") - testingcommon.AssertNoActions(t, clusterActions) + testingcommon.AssertActions(t, clusterActions, "patch") }, }, { - name: "remove finalizer of mcl", - cluster: testinghelpers.NewDeletingManagedCluster(), - expectedOp: gcReconcileContinue, + name: "gc is disable with no works", + clusterName: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + resourceCleanupFeatureGateEnable: false, + workRoleBinding: testinghelpers.NewRoleBinding(testinghelpers.TestManagedClusterName, + workRoleBindingName(testinghelpers.TestManagedClusterName), []string{manifestWorkFinalizer}, + map[string]string{clusterv1.ClusterNameLabelKey: testinghelpers.TestManagedClusterName}, true), + + expectedOp: gcReconcileStop, validateActions: func(t *testing.T, kubeActions, clusterActions []clienttesting.Action) { - testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete") + testingcommon.AssertActions(t, kubeActions, "delete", "delete", "delete", "delete", "patch") testingcommon.AssertActions(t, clusterActions, "patch") }, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { + objs := []runtime.Object{} + mclClusterRole := testinghelpers.NewClusterRole( - mclClusterRoleName(c.cluster.Name), + mclClusterRoleName(c.clusterName), []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) mclClusterRoleBinding := testinghelpers.NewClusterRoleBinding( - mclClusterRoleBindingName(c.cluster.Name), + mclClusterRoleBindingName(c.clusterName), []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) - registrationRoleBinding := testinghelpers.NewRoleBinding(c.cluster.Name, - registrationRoleBindingName(c.cluster.Name), + + registrationRoleBinding := testinghelpers.NewRoleBinding(c.clusterName, + registrationRoleBindingName(c.clusterName), []string{}, map[string]string{clusterv1.ClusterNameLabelKey: ""}, false) + objs = append(objs, mclClusterRole, mclClusterRoleBinding, registrationRoleBinding) - objs := []runtime.Object{mclClusterRole, mclClusterRoleBinding, registrationRoleBinding} if c.workRoleBinding != nil { objs = append(objs, c.workRoleBinding) } @@ -107,16 +149,20 @@ func TestGCClusterRbacController(t *testing.T) { } } - clusterClient := fakeclusterclient.NewSimpleClientset(c.cluster) - clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10) - clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() - if err := clusterStore.Add(c.cluster); err != nil { - t.Fatal(err) - } + var clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus] + var clusterClient *fakeclusterclient.Clientset + if c.cluster != nil { + clusterClient = fakeclusterclient.NewSimpleClientset(c.cluster) + clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10) + clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() + if err := clusterStore.Add(c.cluster); err != nil { + t.Fatal(err) + } - clusterPatcher := patcher.NewPatcher[ - *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( - clusterClient.ClusterV1().ManagedClusters()) + clusterPatcher = patcher.NewPatcher[ + *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( + clusterClient.ClusterV1().ManagedClusters()) + } workClient := fakeworkclient.NewSimpleClientset() workInformerFactory := workinformers.NewSharedInformerFactory(workClient, 5*time.Minute) @@ -130,32 +176,32 @@ func TestGCClusterRbacController(t *testing.T) { _ = newGCClusterRbacController( kubeClient, clusterPatcher, - clusterInformerFactory.Cluster().V1().ManagedClusters(), kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), workInformerFactory.Work().V1().ManifestWorks().Lister(), register.NewNoopApprover(), events.NewInMemoryRecorder(""), - true, + c.resourceCleanupFeatureGateEnable, ) ctrl := &gcClusterRbacController{ kubeClient: kubeClient, - clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(), clusterRoleLister: kubeInformerFactory.Rbac().V1().ClusterRoles().Lister(), - clusterRoleBingLister: kubeInformerFactory.Rbac().V1().ClusterRoleBindings().Lister(), roleBindingLister: kubeInformerFactory.Rbac().V1().RoleBindings().Lister(), manifestWorkLister: workInformerFactory.Work().V1().ManifestWorks().Lister(), clusterPatcher: clusterPatcher, approver: register.NewNoopApprover(), eventRecorder: events.NewInMemoryRecorder(""), - resourceCleanupFeatureGateEnable: true, + resourceCleanupFeatureGateEnable: c.resourceCleanupFeatureGateEnable, } - op, err := ctrl.reconcile(context.TODO(), c.cluster) + op, err := ctrl.reconcile(context.TODO(), c.cluster, c.clusterName) testingcommon.AssertError(t, err, "") assert.Equal(t, op, c.expectedOp) - c.validateActions(t, kubeClient.Actions(), clusterClient.Actions()) + if clusterClient != nil { + c.validateActions(t, kubeClient.Actions(), clusterClient.Actions()) + } else { + c.validateActions(t, kubeClient.Actions(), nil) + } }) } } diff --git a/pkg/registration/hub/gc/gc_resources.go b/pkg/registration/hub/gc/gc_resources.go index ddfc6dc8c..c89d99299 100644 --- a/pkg/registration/hub/gc/gc_resources.go +++ b/pkg/registration/hub/gc/gc_resources.go @@ -46,17 +46,13 @@ func newGCResourcesController( } } -func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (gcReconcileOp, error) { +func (r *gcResourcesController) reconcile(ctx context.Context, + cluster *clusterv1.ManagedCluster, clusterNamespace string) (gcReconcileOp, error) { var errs []error - - if cluster.DeletionTimestamp.IsZero() { - return gcReconcileContinue, nil - } - // delete the resources in order. to delete the next resource after all resource instances are deleted. for _, resourceGVR := range r.resourceGVRList { resourceList, err := r.metadataClient.Resource(resourceGVR). - Namespace(cluster.Name).List(ctx, metav1.ListOptions{}) + Namespace(clusterNamespace).List(ctx, metav1.ListOptions{}) if errors.IsNotFound(err) { continue } @@ -68,21 +64,23 @@ func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv continue } - remainingCnt, finalizerPendingCnt := r.RemainingCnt(resourceList) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: clusterv1.ManagedClusterConditionDeleting, - Status: metav1.ConditionFalse, - Reason: clusterv1.ConditionDeletingReasonResourceRemaining, - Message: fmt.Sprintf("The resource %v is remaning, the remaining count is %v, "+ - "the finalizer pending count is %v", resourceGVR.Resource, remainingCnt, finalizerPendingCnt), - }) + if cluster != nil { + remainingCnt, finalizerPendingCnt := r.RemainingCnt(resourceList) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: clusterv1.ManagedClusterConditionDeleting, + Status: metav1.ConditionFalse, + Reason: clusterv1.ConditionDeletingReasonResourceRemaining, + Message: fmt.Sprintf("The resource %v is remaning, the remaining count is %v, "+ + "the finalizer pending count is %v", resourceGVR.Resource, remainingCnt, finalizerPendingCnt), + }) + } // sort the resources by priority, and then find the lowest priority. priorityResourceMap := mapPriorityResource(resourceList) firstDeletePriority := getFirstDeletePriority(priorityResourceMap) // delete the resource instances with the lowest priority in one reconciling. for _, resourceName := range priorityResourceMap[firstDeletePriority] { - err = r.metadataClient.Resource(resourceGVR).Namespace(cluster.Name). + err = r.metadataClient.Resource(resourceGVR).Namespace(clusterNamespace). Delete(ctx, resourceName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { errs = append(errs, err) @@ -95,12 +93,14 @@ func (r *gcResourcesController) reconcile(ctx context.Context, cluster *clusterv return gcReconcileRequeue, nil } - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: clusterv1.ManagedClusterConditionDeleting, - Status: metav1.ConditionTrue, - Reason: clusterv1.ConditionDeletingReasonNoResource, - Message: "No cleaned resource in cluster ns.", - }) + if cluster != nil { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: clusterv1.ManagedClusterConditionDeleting, + Status: metav1.ConditionTrue, + Reason: clusterv1.ConditionDeletingReasonNoResource, + Message: "No cleaned resource in cluster ns.", + }) + } return gcReconcileContinue, nil } diff --git a/pkg/registration/hub/gc/gc_resources_test.go b/pkg/registration/hub/gc/gc_resources_test.go index dcb4b87ff..5c1bccffa 100644 --- a/pkg/registration/hub/gc/gc_resources_test.go +++ b/pkg/registration/hub/gc/gc_resources_test.go @@ -22,38 +22,32 @@ import ( func TestGCResourcesController(t *testing.T) { cases := []struct { - name string - cluster *clusterv1.ManagedCluster - objs []runtime.Object - expectedOp gcReconcileOp - expectedErr error - validateActions func(t *testing.T, kubeActions []clienttesting.Action) + name string + clusterNamespace string + cluster *clusterv1.ManagedCluster + objs []runtime.Object + expectedOp gcReconcileOp + expectedErr error + validateActions func(t *testing.T, kubeActions []clienttesting.Action) }{ { - name: "do nothing if the cluster is not deleting", - cluster: testinghelpers.NewManagedCluster(), - expectedOp: gcReconcileContinue, - expectedErr: nil, - validateActions: func(t *testing.T, kubeActions []clienttesting.Action) { - testingcommon.AssertNoActions(t, kubeActions) - }, - }, - { - name: "delete addon if the cluster is deleting", - cluster: testinghelpers.NewDeletingManagedCluster(), - objs: []runtime.Object{newAddonMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, - expectedOp: gcReconcileRequeue, - expectedErr: nil, + name: "delete addon", + cluster: testinghelpers.NewDeletingManagedCluster(), + clusterNamespace: testinghelpers.TestManagedClusterName, + objs: []runtime.Object{newAddonMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, + expectedOp: gcReconcileRequeue, + expectedErr: nil, validateActions: func(t *testing.T, kubeActions []clienttesting.Action) { testingcommon.AssertActions(t, kubeActions, "list", "delete") }, }, { - name: "delete work if the cluster is deleting", - cluster: testinghelpers.NewDeletingManagedCluster(), - objs: []runtime.Object{newWorkMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, - expectedOp: gcReconcileRequeue, - expectedErr: nil, + name: "delete work", + clusterNamespace: testinghelpers.TestManagedClusterName, + cluster: testinghelpers.NewDeletingManagedCluster(), + objs: []runtime.Object{newWorkMetadata(testinghelpers.TestManagedClusterName, "test", nil)}, + expectedOp: gcReconcileRequeue, + expectedErr: nil, validateActions: func(t *testing.T, kubeActions []clienttesting.Action) { testingcommon.AssertActions(t, kubeActions, "list", "list", "delete") }, @@ -75,7 +69,7 @@ func TestGCResourcesController(t *testing.T) { eventRecorder: events.NewInMemoryRecorder(""), } - op, err := ctrl.reconcile(context.TODO(), c.cluster) + op, err := ctrl.reconcile(context.TODO(), c.cluster, c.clusterNamespace) assert.Equal(t, c.expectedErr, err) assert.Equal(t, c.expectedOp, op) c.validateActions(t, metadataClient.Actions()) diff --git a/pkg/registration/hub/manager.go b/pkg/registration/hub/manager.go index 299bde4d2..398131651 100644 --- a/pkg/registration/hub/manager.go +++ b/pkg/registration/hub/manager.go @@ -272,7 +272,6 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers( gcController := gc.NewGCController( kubeInformers.Rbac().V1().ClusterRoles().Lister(), - kubeInformers.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformers.Rbac().V1().RoleBindings().Lister(), clusterInformers.Cluster().V1().ManagedClusters(), workInformers.Work().V1().ManifestWorks().Lister(),