From 6802561c67b48bfa70733cdfdaac15687469ed27 Mon Sep 17 00:00:00 2001 From: rho <13165182+rhoboat@users.noreply.github.com> Date: Thu, 17 Dec 2020 09:36:18 -0800 Subject: [PATCH] [k8s-services chart] Add support for custom resources (#86) --- .circleci/config.yml | 50 +++++++------ README.adoc | 8 ++ charts/k8s-service/README.md | 49 +++++++++--- .../templates/customresources.yaml | 12 +++ charts/k8s-service/values.yaml | 54 ++++++++++--- test/fixtures/custom_resources_values.yaml | 10 +++ .../multiple_custom_resources_values.yaml | 18 +++++ ...s_service_custom_resources_example_test.go | 75 +++++++++++++++++++ ..._service_custom_resources_template_test.go | 55 ++++++++++++++ ...s_service_service_monitor_template_test.go | 16 ++-- test/k8s_service_template_test.go | 4 +- 11 files changed, 299 insertions(+), 52 deletions(-) create mode 100644 charts/k8s-service/templates/customresources.yaml create mode 100644 test/fixtures/custom_resources_values.yaml create mode 100644 test/fixtures/multiple_custom_resources_values.yaml create mode 100644 test/k8s_service_custom_resources_example_test.go create mode 100644 test/k8s_service_custom_resources_template_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 533e9a33..d9125482 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -119,27 +119,29 @@ workflows: version: 2 test-and-deploy: jobs: - - setup: - filters: - tags: - only: /^v.*/ - context: - - Gruntwork Admin - - test: - requires: - - setup - filters: - tags: - only: /^v.*/ - context: - - Gruntwork Admin - - deploy: - requires: - - test - filters: - tags: - only: /^v.*/ - branches: - ignore: /.*/ - context: - - Gruntwork Admin + - setup: + filters: + tags: + only: /^v.*/ + context: + - Gruntwork Admin + + - test: + requires: + - setup + filters: + tags: + only: /^v.*/ + context: + - Gruntwork Admin + + - deploy: + requires: + - test + filters: + tags: + only: /^v.*/ + branches: + ignore: /.*/ + context: + - Gruntwork Admin diff --git a/README.adoc b/README.adoc index 30304a24..51936441 100644 --- a/README.adoc +++ b/README.adoc @@ -58,6 +58,9 @@ Library], a collection of reusable, battle-tested, production ready infrastructu * link:/core-concepts.md#how-do-you-run-applications-on-kubernetes[How do you run applications on Kubernetes?] * link:/core-concepts.md#what-is-helm[What is Helm?] * _https://www.manning.com/books/kubernetes-in-action[Kubernetes in Action]_: the best book we've found for getting up and running with Kubernetes. +* link:/charts/k8s-service/README.md##how-to-use-this-chart[How to use this chart?] +* link:/charts/k8s-service/README.md#what-resources-does-this-helm-chart-deploy[What resources does this Helm Chart deploy?] +* link:/charts/k8s-service/README.md#what-is-a-sidecar-container[What is a sidecar container?] === Repo organization @@ -87,11 +90,16 @@ If you want to deploy this repo in production, check out the following resources === Day-to-day operations +* link:/charts/k8s-service/README.md#how-do-i-deploy-additional-services-not-managed-by-the-chart[How do I deploy additional services not managed by the chart?] * link:/charts/k8s-service/README.md#how-do-i-expose-my-application-internally-to-the-cluster[How do I expose my application internally to the cluster?] * link:/charts/k8s-service/README.md#how-do-i-expose-my-application-externally-outside-of-the-cluster[How do I expose my application externally, outside of the cluster?] * link:/charts/k8s-service/README.md#how-do-i-deploy-a-worker-service[How do I deploy a worker service?] * link:/charts/k8s-service/README.md#how-do-i-check-the-status-of-the-rollout[How do I check the status of the rollout?] * link:/charts/k8s-service/README.md#how-do-i-set-and-share-configurations-with-the-application[How do I set and share configurations with the application?] +* link:/charts/k8s-service/README.md#why-does-the-pod-have-a-prestop-hook-with-a-shutdown-delay[Why does the Pod have a preStop hook with a Shutdown Delay?] +* link:/charts/k8s-service/README.md#how-do-i-use-a-private-registry[How do I use a private registry?] +* link:/charts/k8s-service/README.md#how-do-i-verify-my-canary-deployment[How do I verify my canary deployment?] +* link:/charts/k8s-service/README.md#how-do-i-roll-back-a-canary-deployment[How do I roll back a canary deployment?] === Major changes diff --git a/charts/k8s-service/README.md b/charts/k8s-service/README.md index 0f06dde2..46589e97 100644 --- a/charts/k8s-service/README.md +++ b/charts/k8s-service/README.md @@ -23,6 +23,7 @@ If you're using the chart to deploy to [GKE](https://cloud.google.com/kubernetes * See the provided [values.yaml](./values.yaml) file for the required and optional configuration values that you can set on this chart. +back to [root README](/README.adoc#core-concepts) ## What resources does this Helm Chart deploy? @@ -30,11 +31,11 @@ The following resources will be deployed with this Helm Chart, depending on whic - `Deployment`: The main `Deployment` controller that will manage the application container image specified in the `containerImage` input value. -- Secondary `Deployment` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). +- Secondary `Deployment` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). - `Service`: The `Service` resource providing a stable endpoint that can be used to address to `Pods` created by the `Deployment` controller. Created only if you configure the `service` input (and set `service.enabled = true`). -- `ServiceMonitor`: The `ServiceMonitor` describes the set of targets to be monitored by Prometheus. Created only if you configure the service input and set `serviceMonitor.enabled = true`. +- `ServiceMonitor`: The `ServiceMonitor` describes the set of targets to be monitored by Prometheus. Created only if you configure the service input and set `serviceMonitor.enabled = true`. - `Ingress`: The `Ingress` resource providing host and path routing rules to the `Service` for the deployed `Ingress` controller in the cluster. Created only if you configure the `ingress` input (and set `ingress.enabled = true`). @@ -48,6 +49,17 @@ The following resources will be deployed with this Helm Chart, depending on whic - `ManagedCertificate`: The `ManagedCertificate` is a [GCP](https://cloud.google.com/) -specific resource that creates a Google Managed SSL certificate. Google-managed SSL certificates are provisioned, renewed, and managed for your domain names. Read more about Google-managed SSL certificates [here](https://cloud.google.com/load-balancing/docs/ssl-certificates#managed-certs). Created only if you configure the `google.managedCertificate` input (and set `google.managedCertificate.enabled = true` and `google.managedCertificate.domainName = your.domain.name`). +back to [root README](/README.adoc#core-concepts) + +## How do I deploy additional services not managed by the chart? + +You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` +key. You provide each resource manifest directly as a value under `customResources.resources` and set +`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in +[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and +[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). + +back to [root README](/README.adoc#day-to-day-operations) ## How do I expose my application internally to the cluster? @@ -165,6 +177,7 @@ Note that DNS does not resolve ports, so in this case, you will have to know whi `edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it is specified in the Helm Chart input value. +back to [root README](/README.adoc#day-to-day-operations) ## How do I expose my application externally, outside of the cluster? @@ -490,6 +503,7 @@ ingress: The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be evaluated first when routing requests. +back to [root README](/README.adoc#day-to-day-operations) ## How do I deploy a worker service? @@ -511,6 +525,7 @@ service: This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on the container. +back to [root README](/README.adoc#day-to-day-operations) ## How do I check the status of the rollout? @@ -629,6 +644,7 @@ Events: This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because there is not enough capacity in the cluster to schedule the `Pod`. +back to [root README](/README.adoc#day-to-day-operations) ## How do I set and share configurations with the application? @@ -915,6 +931,7 @@ approach: - Storing sensitive configuration values +back to [root README](/README.adoc#day-to-day-operations) ## How do you update the application to a new version? @@ -966,9 +983,9 @@ Note that certain changes will lead to a replacement of the `Deployment` resourc `applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time because the resources are replaced in an uncontrolled fashion. -## How do I create a canary deployment? +## How do I create a canary deployment? -You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. +You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: @@ -977,13 +994,15 @@ canary: enabled: true containerImage: repository: nginx - tag: 1.15.9 + tag: 1.15.9 ``` Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. -## How do I verify my canary deployment? +back to [root README](/README.adoc#major-changes) -Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: +## How do I verify my canary deployment? + +Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: ```bash $ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" @@ -994,9 +1013,11 @@ edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s ``` -Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` +Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` + +back to [root README](/README.adoc#day-to-day-operations) -## How do I roll back a canary deployment? +## How do I roll back a canary deployment? Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: @@ -1005,6 +1026,8 @@ $ helm upgrade -f values.yaml edge-service gruntwork/k8s-service ``` Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. +back to [root README](/README.adoc#day-to-day-operations) + ## How do I ensure a minimum number of Pods are available across node maintenance? Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary @@ -1019,6 +1042,8 @@ and in [the official documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). +back to [root README](/README.adoc#major-changes) + ## Why does the Pod have a preStop hook with a Shutdown Delay? When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from @@ -1039,6 +1064,8 @@ You can read more about this topic in [our blog post Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). +back to [root README](/README.adoc#day-to-day-operations) + ## What is a sidecar container? In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` @@ -1094,6 +1121,8 @@ container configured by the `containerImage`, `ports`, `livenessProbe`, etc inpu `livenessProbe` should be rendered directly within the `sideCarContainers` input value. +back to [root README](/README.adoc#core-concepts) + ## How do I use a private registry? To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker @@ -1127,3 +1156,5 @@ imagePullSecrets: You can learn more about using private registries with Kubernetes in [the official documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). + +back to [root README](/README.adoc#day-to-day-operations) diff --git a/charts/k8s-service/templates/customresources.yaml b/charts/k8s-service/templates/customresources.yaml new file mode 100644 index 00000000..9a334d4d --- /dev/null +++ b/charts/k8s-service/templates/customresources.yaml @@ -0,0 +1,12 @@ +{{- /* +If the operator configures the customResources input variable, then create custom resources based on the given +definitions. If a list of definitions is provided, separate them using the YAML separator so they can all be executed +from the same template file. +*/ -}} + +{{- if .Values.customResources.enabled -}} +{{- range $name, $value := .Values.customResources.resources }} +--- +{{ $value }} +{{- end }} +{{- end }} diff --git a/charts/k8s-service/values.yaml b/charts/k8s-service/values.yaml index 96c236d4..d2dda0a3 100644 --- a/charts/k8s-service/values.yaml +++ b/charts/k8s-service/values.yaml @@ -147,7 +147,7 @@ sideCarContainers: {} # for allowing you to find any issues early. # The expected keys of the canary spec are: # - enabled (bool) (required) : Whether or not the canary deployment should be created. If false, no canary deployment will be created. -# - containerImage (map) (required) : A map that specifies the application container and tag to be managed by the canary deployment. +# - containerImage (map) (required) : A map that specifies the application container and tag to be managed by the canary deployment. # This has the same structure as containerImage. # - replicaCount (int) : The number of pods that should be managed by the canary deployment. Defaults to 1 if unset. # @@ -373,7 +373,7 @@ additionalContainerEnv: {} configMaps: {} # persistentVolumes is a map that specifies PeristantVolumes that should be mounted on the pod. Each entry represents a -# persistent volume which should already exist within your cluster. They Key is the name of the persistent volume. +# persistent volume which should already exist within your cluster. They Key is the name of the persistent volume. # The value is also a map and has the following attributes: # - mountPath (string) (required) # : The path within the container upon which this volume should be mounted. @@ -469,8 +469,10 @@ imagePullSecrets: [] # - automountServiceAccountToken (bool) : Whether or not to automatically mount the ServiceAccount token as a volume # into the Pod. Note that this can be used to override the equivalent config # on the ServiceAccount. -# - create (bool) : Whether or not to create a service account with the desired name -# - annotations (map) : Annotations will add the provided map to the annotations for the service +# - create (bool) : Whether or not to create a service account with the desired name +# - annotations (map) : Annotations will add the provided map to the annotations for the service +# account created +# - labels (map) : Labels will add the provided map to the annotations for the service # account created # # The default config uses empty string to indicate that the default service account should be used and one shouldn't @@ -478,16 +480,17 @@ imagePullSecrets: [] serviceAccount: name: "" create: false + annotations: {} labels: {} # horizontalPodAutoscaler is a map that configures the Horizontal Pod Autoscaler information for this pod # The expected keys of hpa are: -# - enabled (bool) : Whether or not Horizontal Pod Autoscaler should be created, if false the +# - enabled (bool) : Whether or not Horizontal Pod Autoscaler should be created, if false the # Horizontal Pod Autoscaler will not be created -# - minReplicas (int) : The minimum amount of replicas allowed -# - maxReplicas (int) : The maximum amount of replicas allowed -# - avgCpuUtilization (int) : The target average CPU utilization to be used with the metrics -# - avgMemoryUtilization (int) : The target average Memory utilization to be used with the metrics +# - minReplicas (int) : The minimum amount of replicas allowed +# - maxReplicas (int) : The maximum amount of replicas allowed +# - avgCpuUtilization (int) : The target average CPU utilization to be used with the metrics +# - avgMemoryUtilization (int) : The target average Memory utilization to be used with the metrics # # The default config will not create the Horizontal Pod Autoscaler by setting enabled = false, the default values are # set so if enabled is true the horizontalPodAutoscaler has valid values. @@ -496,6 +499,39 @@ horizontalPodAutoscaler: minReplicas: 1 maxReplicas: 10 +# customResources is a map that lets you define Kubernetes resources you want installed and configured as part of this chart. +# The expected keys of customResources are: +# - enabled (bool) : Whether or not the provided custom resource definitions should be created. +# - resources (map) : A map of custom Kubernetes resources you want to install during the installation of the chart. +# +# NOTE: By default enabled = false, and no custom resources will be created. If you provide any resources, be sure to +# provide them as quoted using "|", and set enabled: true. +# +# The following example creates a custom ConfigMap and a Secret. +# +# EXAMPLE: +# +# customResources: +# enabled: true +# resources: +# custom_configmap: | +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: example +# data: +# key: value +# custom_secret: | +# apiVersion: v1 +# kind: Secret +# metadata: +# name: example +# type: Opaque +# data: +# key: dmFsdWU= +customResources: + enabled: false + resources: {} #---------------------------------------------------------------------------------------------------------------------- # AWS SPECIFIC VALUES diff --git a/test/fixtures/custom_resources_values.yaml b/test/fixtures/custom_resources_values.yaml new file mode 100644 index 00000000..bade8d2f --- /dev/null +++ b/test/fixtures/custom_resources_values.yaml @@ -0,0 +1,10 @@ +customResources: + enabled: true + resources: + custom_configmap: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: example + data: + key: value diff --git a/test/fixtures/multiple_custom_resources_values.yaml b/test/fixtures/multiple_custom_resources_values.yaml new file mode 100644 index 00000000..58cfc4c0 --- /dev/null +++ b/test/fixtures/multiple_custom_resources_values.yaml @@ -0,0 +1,18 @@ +customResources: + enabled: true + resources: + custom_configmap: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: example-config-map + data: + foo: bar + custom_secret: | + apiVersion: v1 + kind: Secret + metadata: + name: example-secret + type: Opaque + data: + secret_text: dmFsdWU= diff --git a/test/k8s_service_custom_resources_example_test.go b/test/k8s_service_custom_resources_example_test.go new file mode 100644 index 00000000..df1f6dba --- /dev/null +++ b/test/k8s_service_custom_resources_example_test.go @@ -0,0 +1,75 @@ +// +build all integration + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test the base case of the k8s-service-custom-resources example. +// This test will: +// +// 1. Render a chart with multiple custom resources. +// 2. Run `kubectl apply` with the rendered chart. +// 3. Verify that the custom resources were deployed, by checking the k8s API. +func TestK8SServiceCustomResourcesExample(t *testing.T) { + t.Parallel() + + // Setup paths for testing the example chart + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-service")) + require.NoError(t, err) + + // Create a test namespace to deploy resources into, to avoid colliding with other tests + kubectlOptions := k8s.NewKubectlOptions("", "", "") + uniqueID := random.UniqueId() + testNamespace := fmt.Sprintf("k8s-service-custom-resources-%s", strings.ToLower(uniqueID)) + k8s.CreateNamespace(t, kubectlOptions, testNamespace) + defer k8s.DeleteNamespace(t, kubectlOptions, testNamespace) + kubectlOptions.Namespace = testNamespace + + // Use the values file in the fixtures + options := &helm.Options{ + ValuesFiles: []string{ + filepath.Join(helmChartPath, "linter_values.yaml"), + filepath.Join("fixtures", "multiple_custom_resources_values.yaml"), + }, + } + + // Render the chart + out := helm.RenderTemplate(t, options, helmChartPath, "customresources", []string{"templates/customresources.yaml"}) + + defer k8s.KubectlDeleteFromString(t, kubectlOptions, out) + + // Deploy a subset of the chart, just the ConfigMap and Secret + k8s.KubectlApplyFromString(t, kubectlOptions, out) + + // Verify that ConfigMap and Secret got created, but do nothing with the output that is returned. + // We only care that these functions do not error. + k8s.GetSecret(t, kubectlOptions, "example-secret") + getConfigMap(t, kubectlOptions, "example-config-map") +} + +// getConfigMap should be implemented in Terratest +func getConfigMap(t *testing.T, options *k8s.KubectlOptions, name string) corev1.ConfigMap { + clientset, err := k8s.GetKubernetesClientFromOptionsE(t, options) + require.NoError(t, err) + + configMap, err := clientset.CoreV1().ConfigMaps(options.Namespace).Get(name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, configMap) + + return *configMap +} diff --git a/test/k8s_service_custom_resources_template_test.go b/test/k8s_service_custom_resources_template_test.go new file mode 100644 index 00000000..7b7a2971 --- /dev/null +++ b/test/k8s_service_custom_resources_template_test.go @@ -0,0 +1,55 @@ +// +build all tpl + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "path/filepath" + "testing" + + "github.com/ghodss/yaml" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" +) + +// Test that setting customResources.enabled = false will cause the helm template to not render any custom resources +func TestK8SServiceCustomResourcesEnabledFalseDoesNotCreateCustomResources(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-service")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + // We then use SetValues to override all the defaults. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-service", "linter_values.yaml")}, + SetValues: map[string]string{"customResources.enabled": "false"}, + } + _, err = helm.RenderTemplateE(t, options, helmChartPath, "customresources", []string{"templates/customresources.yaml"}) + require.Error(t, err) +} + +// Test that configuring a ConfigMap and a Secret will render correctly to something +func TestK8SServiceCustomResourcesEnabledCreatesCustomResources(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-service")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{ + filepath.Join("..", "charts", "k8s-service", "linter_values.yaml"), + filepath.Join("fixtures", "custom_resources_values.yaml"), + }, + } + out := helm.RenderTemplate(t, options, helmChartPath, "customresources", []string{"templates/customresources.yaml"}) + + // We render the output to a map to validate it + renderedConfigMap := corev1.ConfigMap{} + + require.NoError(t, yaml.Unmarshal([]byte(out), &renderedConfigMap)) +} diff --git a/test/k8s_service_service_monitor_template_test.go b/test/k8s_service_service_monitor_template_test.go index 73cc1d14..74cb4ad0 100644 --- a/test/k8s_service_service_monitor_template_test.go +++ b/test/k8s_service_service_monitor_template_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "testing" - promethues_operator_v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" + prometheus_operator_v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" "github.com/ghodss/yaml" "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/assert" @@ -52,15 +52,15 @@ func TestK8SServiceServiceMonitorEnabledCreatesServiceMonitor(t *testing.T) { out := helm.RenderTemplate(t, options, helmChartPath, "servicemonitor", []string{"templates/servicemonitor.yaml"}) // We take the output and render it to a map to validate it is an empty yaml - rendered := promethues_operator_v1.ServiceMonitor{} + rendered := prometheus_operator_v1.ServiceMonitor{} require.NoError(t, yaml.Unmarshal([]byte(out), &rendered)) - require.Equal(t, len(rendered.Spec.Endpoints), 1) + require.Equal(t, 1, len(rendered.Spec.Endpoints)) // check the default endpoint properties defaultEndpoint := rendered.Spec.Endpoints[0] - assert.Equal(t, defaultEndpoint.Interval, "10s") - assert.Equal(t, defaultEndpoint.ScrapeTimeout, "10s") - assert.Equal(t, defaultEndpoint.Path, "/metrics") - assert.Equal(t, defaultEndpoint.Port, "http") - assert.Equal(t, defaultEndpoint.Scheme, "http") + assert.Equal(t, "10s", defaultEndpoint.Interval) + assert.Equal(t, "10s", defaultEndpoint.ScrapeTimeout) + assert.Equal(t, "/metrics", defaultEndpoint.Path) + assert.Equal(t, "http", defaultEndpoint.Port) + assert.Equal(t, "http", defaultEndpoint.Scheme) } diff --git a/test/k8s_service_template_test.go b/test/k8s_service_template_test.go index 58516ada..a9ee4fd1 100644 --- a/test/k8s_service_template_test.go +++ b/test/k8s_service_template_test.go @@ -722,7 +722,7 @@ func TestK8SServiceDeploymentAddingAdditionalLabels(t *testing.T) { second_custom_deployment_label_value := "second-custom-value" deployment := renderK8SServiceDeploymentWithSetValues(t, map[string]string{"additionalDeploymentLabels.first-label": first_custom_deployment_label_value, - "additionalDeploymentLabels.second-label":second_custom_deployment_label_value}) + "additionalDeploymentLabels.second-label": second_custom_deployment_label_value}) assert.Equal(t, deployment.Labels["first-label"], first_custom_deployment_label_value) assert.Equal(t, deployment.Labels["second-label"], second_custom_deployment_label_value) @@ -733,7 +733,7 @@ func TestK8SServicePodAddingAdditionalLabels(t *testing.T) { first_custom_pod_label_value := "first-custom-value" second_custom_pod_label_value := "second-custom-value" deployment := renderK8SServiceDeploymentWithSetValues(t, - map[string]string{"additionalPodLabels.first-label": first_custom_pod_label_value, + map[string]string{"additionalPodLabels.first-label": first_custom_pod_label_value, "additionalPodLabels.second-label": second_custom_pod_label_value}) assert.Equal(t, deployment.Spec.Template.Labels["first-label"], first_custom_pod_label_value)