diff --git a/Gopkg.lock b/Gopkg.lock index 86392086..b6b74de3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -334,12 +334,12 @@ version = "v0.3.7" [[projects]] - branch = "client-go" - digest = "1:928c48472d9006c89f70c696ea1f7cb829b38e410e0641ec3f434080c2a90896" + branch = "immutable-transform" + digest = "1:5a6609de998eec16a99dd85dff44eda0b432a1152488450451098059fdfdb8e6" name = "github.com/jcrossley3/manifestival" packages = ["."] pruneopts = "NUT" - revision = "97e164e00f2355c8d3816d9006802224ba613a60" + revision = "e5b28800a2bbc4342940c8717a770030905f5519" [[projects]] digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" diff --git a/Gopkg.toml b/Gopkg.toml index c657b29a..93e9337d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -56,7 +56,7 @@ required = [ [[constraint]] name = "github.com/jcrossley3/manifestival" - branch = "client-go" + branch = "immutable-transform" [[constraint]] name = "knative.dev/pkg" diff --git a/pkg/reconciler/knativeserving/knativeserving_controller.go b/pkg/reconciler/knativeserving/knativeserving_controller.go index a51a68c8..0ab81631 100644 --- a/pkg/reconciler/knativeserving/knativeserving_controller.go +++ b/pkg/reconciler/knativeserving/knativeserving_controller.go @@ -109,19 +109,24 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error { func (r *Reconciler) reconcile(ctx context.Context, ks *servingv1alpha1.KnativeServing) error { reqLogger := r.Logger.With(zap.String("Request.Namespace", ks.Namespace)).With("Request.Name", ks.Name) + reqLogger.Infow("Reconciling KnativeServing", "status", ks.Status) // TODO: We need to find a better way to make sure the instance has the updated info. ks.SetGroupVersionKind(servingv1alpha1.SchemeGroupVersion.WithKind("KnativeServing")) - stages := []func(*servingv1alpha1.KnativeServing) error{ + stages := []func(*mf.Manifest, *servingv1alpha1.KnativeServing) error{ r.initStatus, r.install, r.checkDeployments, r.deleteObsoleteResources, } - reqLogger.Infow("Reconciling KnativeServing", "status", ks.Status) + manifest, err := r.transform(ks) + if err != nil { + return err + } + for _, stage := range stages { - if err := stage(ks); err != nil { + if err := stage(manifest, ks); err != nil { return err } } @@ -129,16 +134,14 @@ func (r *Reconciler) reconcile(ctx context.Context, ks *servingv1alpha1.KnativeS return nil } -// Initialize status conditions -func (r *Reconciler) initStatus(instance *servingv1alpha1.KnativeServing) error { - r.Logger.Debug("Initializing status") - if len(instance.Status.Conditions) == 0 { - instance.Status.InitializeConditions() - if err := r.updateStatus(instance); err != nil { - return err - } +// Transform the resources +func (r *Reconciler) transform(instance *servingv1alpha1.KnativeServing) (*mf.Manifest, error) { + r.Logger.Debug("Transforming manifest") + transforms, err := platform.Transformers(r.KubeClientSet, instance, r.Logger) + if err != nil { + return nil, err } - return nil + return r.config.Transform(transforms...) } // Update the status subresource @@ -153,37 +156,22 @@ func (r *Reconciler) updateStatus(instance *servingv1alpha1.KnativeServing) erro return nil } -// Install the resources from the Manifest -func (r *Reconciler) install(instance *servingv1alpha1.KnativeServing) error { - r.Logger.Debug("Installing manifest") - defer r.updateStatus(instance) - - if err := r.transform(instance); err != nil { - return err - } - if err := r.apply(instance); err != nil { - return err - } - return nil -} - -// Transform the resources -func (r *Reconciler) transform(instance *servingv1alpha1.KnativeServing) error { - r.Logger.Debug("Transforming manifest") - transforms, err := platform.Transformers(r.KubeClientSet, instance, r.Logger) - if err != nil { - return err - } - if err := r.config.Transform(transforms...); err != nil { - return err +// Initialize status conditions +func (r *Reconciler) initStatus(_ *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { + r.Logger.Debug("Initializing status") + if len(instance.Status.Conditions) == 0 { + instance.Status.InitializeConditions() + if err := r.updateStatus(instance); err != nil { + return err + } } return nil } -// Apply the embedded resources -func (r *Reconciler) apply(instance *servingv1alpha1.KnativeServing) error { - r.Logger.Debug("Applying manifest") - if err := r.config.ApplyAll(); err != nil { +// Apply the manifest resources +func (r *Reconciler) install(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { + r.Logger.Debug("Installing manifest") + if err := manifest.ApplyAll(); err != nil { instance.Status.MarkInstallFailed(err.Error()) return err } @@ -193,7 +181,7 @@ func (r *Reconciler) apply(instance *servingv1alpha1.KnativeServing) error { } // Check for all deployments available -func (r *Reconciler) checkDeployments(instance *servingv1alpha1.KnativeServing) error { +func (r *Reconciler) checkDeployments(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { r.Logger.Debug("Checking deployments") defer r.updateStatus(instance) available := func(d *appsv1.Deployment) bool { @@ -204,7 +192,7 @@ func (r *Reconciler) checkDeployments(instance *servingv1alpha1.KnativeServing) } return false } - for _, u := range r.config.Resources { + for _, u := range manifest.Resources { if u.GetKind() == "Deployment" { deployment, err := r.KubeClientSet.AppsV1().Deployments(u.GetNamespace()).Get(u.GetName(), metav1.GetOptions{}) if err != nil { @@ -225,24 +213,24 @@ func (r *Reconciler) checkDeployments(instance *servingv1alpha1.KnativeServing) } // Delete obsolete resources from previous versions -func (r *Reconciler) deleteObsoleteResources(instance *servingv1alpha1.KnativeServing) error { +func (r *Reconciler) deleteObsoleteResources(manifest *mf.Manifest, instance *servingv1alpha1.KnativeServing) error { // istio-system resources from 0.3 resource := &unstructured.Unstructured{} resource.SetNamespace("istio-system") resource.SetName("knative-ingressgateway") resource.SetAPIVersion("v1") resource.SetKind("Service") - if err := r.config.Delete(resource, &metav1.DeleteOptions{}); err != nil { + if err := manifest.Delete(resource, &metav1.DeleteOptions{}); err != nil { return err } resource.SetAPIVersion("apps/v1") resource.SetKind("Deployment") - if err := r.config.Delete(resource, &metav1.DeleteOptions{}); err != nil { + if err := manifest.Delete(resource, &metav1.DeleteOptions{}); err != nil { return err } resource.SetAPIVersion("autoscaling/v1") resource.SetKind("HorizontalPodAutoscaler") - if err := r.config.Delete(resource, &metav1.DeleteOptions{}); err != nil { + if err := manifest.Delete(resource, &metav1.DeleteOptions{}); err != nil { return err } // config-controller from 0.5 @@ -250,7 +238,7 @@ func (r *Reconciler) deleteObsoleteResources(instance *servingv1alpha1.KnativeSe resource.SetName("config-controller") resource.SetAPIVersion("v1") resource.SetKind("ConfigMap") - if err := r.config.Delete(resource, &metav1.DeleteOptions{}); err != nil { + if err := manifest.Delete(resource, &metav1.DeleteOptions{}); err != nil { return err } return nil diff --git a/test/e2e/knativeservingdeployment_test.go b/test/e2e/knativeservingdeployment_test.go index 063c6300..05ad67e0 100644 --- a/test/e2e/knativeservingdeployment_test.go +++ b/test/e2e/knativeservingdeployment_test.go @@ -17,6 +17,7 @@ package e2e import ( "errors" + "fmt" "path/filepath" "runtime" "testing" @@ -27,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "knative.dev/pkg/test/logstream" + "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" "knative.dev/serving-operator/test" "knative.dev/serving-operator/test/resources" ) @@ -55,6 +57,11 @@ func TestKnativeServingDeployment(t *testing.T) { knativeServingVerify(t, clients, names) }) + t.Run("configure", func(t *testing.T) { + knativeServingVerify(t, clients, names) + knativeServingConfigure(t, clients, names) + }) + // Delete the deployments one by one to see if they will be recreated. t.Run("restore", func(t *testing.T) { knativeServingVerify(t, clients, names) @@ -78,6 +85,47 @@ func knativeServingVerify(t *testing.T, clients *test.Clients, names test.Resour } +// knativeServingConfigure verifies that KnativeServing config is set properly +func knativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { + // We'll arbitrarily choose the logging config + configKey := "logging" + configMapName := fmt.Sprintf("%s/config-%s", names.Namespace, configKey) + // Get the existing KS without any spec + ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + // Add config to its spec + ks.Spec = v1alpha1.KnativeServingSpec{ + Config: map[string]map[string]string{ + configKey: map[string]string{ + "loglevel.controller": "debug", + }, + }, + } + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verifty the relevant configmap has been updated + err = resources.WaitForConfigMap(configMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["loglevel.controller"] == "debug" + }) + if err != nil { + t.Fatal("The operator failed to update the configmap") + } + // Now remove the config from the spec and update + ks.Spec = v1alpha1.KnativeServingSpec{} + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // And verify the configmap entry is gone + err = resources.WaitForConfigMap(configMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exists := m["loglevel.controller"] + return !exists + }) + if err != nil { + t.Fatal("The operator failed to revert the configmap") + } +} + // deploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. func deploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) diff --git a/test/resources/knativeserving.go b/test/resources/knativeserving.go index 079252a1..a1501732 100644 --- a/test/resources/knativeserving.go +++ b/test/resources/knativeserving.go @@ -28,6 +28,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" "knative.dev/pkg/test/logging" "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" servingv1alpha1 "knative.dev/serving-operator/pkg/client/clientset/versioned/typed/serving/v1alpha1" @@ -73,6 +75,18 @@ func CreateKnativeServing(clients servingv1alpha1.KnativeServingInterface, names return svc, err } +// WaitForConfigMap takes a condition function that evaluates ConfigMap data +func WaitForConfigMap(name string, client *kubernetes.Clientset, fn func(map[string]string) bool) error { + ns, cm, _ := cache.SplitMetaNamespaceKey(name) + return wait.PollImmediate(Interval, Timeout, func() (bool, error) { + cm, err := client.CoreV1().ConfigMaps(ns).Get(cm, metav1.GetOptions{}) + if err != nil { + return false, err + } + return fn(cm.Data), nil + }) +} + // IsKnativeServingReady will check the status conditions of the KnativeServing and return true if the KnativeServing is ready. func IsKnativeServingReady(s *v1alpha1.KnativeServing, err error) (bool, error) { return s.Status.IsReady(), err diff --git a/vendor/github.com/jcrossley3/manifestival/manifestival.go b/vendor/github.com/jcrossley3/manifestival/manifestival.go index 6445f397..ded162ef 100644 --- a/vendor/github.com/jcrossley3/manifestival/manifestival.go +++ b/vendor/github.com/jcrossley3/manifestival/manifestival.go @@ -34,7 +34,7 @@ type Manifestival interface { // Returns a copy of the resource from the api server, nil if not found Get(spec *unstructured.Unstructured) (*unstructured.Unstructured, error) // Transforms the resources within a Manifest - Transform(fns ...Transformer) error + Transform(fns ...Transformer) (*Manifest, error) } type Manifest struct { @@ -161,14 +161,18 @@ func (f *Manifest) ResourceInterface(spec *unstructured.Unstructured) (dynamic.R return f.client.Resource(mapping.Resource).Namespace(spec.GetNamespace()), nil } -// We need to preserve the top-level target keys, specifically -// 'metadata.resourceVersion', 'spec.clusterIP', and any existing -// entries in a ConfigMap's 'data' field. So we only overwrite fields -// set in our src resource. +// We need to preserve some target keys, specifically +// 'metadata.resourceVersion' and 'spec.clusterIP'. So we only +// overwrite fields set in our src resource. // TODO: Use Patch instead func UpdateChanged(src, tgt map[string]interface{}) bool { changed := false for k, v := range src { + // Special case for ConfigMaps + if k == "data" && !equality.Semantic.DeepEqual(v, tgt[k]) { + tgt[k], changed = v, true + continue + } if v, ok := v.(map[string]interface{}); ok { if tgt[k] == nil { tgt[k], changed = v, true diff --git a/vendor/github.com/jcrossley3/manifestival/transform.go b/vendor/github.com/jcrossley3/manifestival/transform.go index e43f5bd8..4480de44 100644 --- a/vendor/github.com/jcrossley3/manifestival/transform.go +++ b/vendor/github.com/jcrossley3/manifestival/transform.go @@ -18,20 +18,21 @@ type Owner interface { } // If an error occurs, no resources are transformed -func (f *Manifest) Transform(fns ...Transformer) error { +func (f *Manifest) Transform(fns ...Transformer) (*Manifest, error) { var results []unstructured.Unstructured for i := 0; i < len(f.Resources); i++ { spec := f.Resources[i].DeepCopy() for _, transform := range fns { - err := transform(spec) - if err != nil { - return err + if transform != nil { + err := transform(spec) + if err != nil { + return nil, err + } } } results = append(results, *spec) } - f.Resources = results - return nil + return &Manifest{Resources: results, client: f.client, mapper: f.mapper}, nil } // We assume all resources in the manifest live in the same namespace