diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 81a95d44..041ed6af 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -1,14 +1,23 @@ ## Run Locally -Example on Minikube: +Please refer to the [serving](https://github.com/knative/serving/blob/main/DEVELOPMENT.md) development guidelines for a list of required tools and settings. + +The following sets up the `autoscaler-keda` extension on a local Minikube cluster. + +**NOTE:** the initial version of this extension was tested on `1.14.0` using only `kourier` as networking layer. ### Install Serving with KEDA support for HPA -``` -MEMORY=${MEMORY:-40000} -CPUS=${CPUS:-6} -EXTRA_CONFIG="apiserver.enable-admission-plugins=\ +#### Setup Minikube + +**NOTE:** depending on your OS, the following command may need edits: + +```bash +$ MEMORY=${MEMORY:-40000} +$ CPUS=${CPUS:-6} + +$ EXTRA_CONFIG="apiserver.enable-admission-plugins=\ LimitRanger,\ NamespaceExists,\ NamespaceLifecycle,\ @@ -17,132 +26,189 @@ ServiceAccount,\ DefaultStorageClass,\ MutatingAdmissionWebhook" -minikube start --driver=kvm2 --memory=$MEMORY --cpus=$CPUS \ +$ minikube start --driver=kvm2 --memory=$MEMORY --cpus=$CPUS \ --kubernetes-version=v1.28.0 \ --disk-size=30g \ --extra-config="$EXTRA_CONFIG" \ --extra-config=kubelet.authentication-token-webhook=true +``` + +#### Install cert-manager + +```bash +$ kubectl apply -f ./third_party/cert-manager-latest/cert-manager.yaml +``` + +#### Install Knative Serving + +```bash +$ kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.16.0/serving-crds.yaml +$ kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.16.0/serving-core.yaml +``` -kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.14.0/serving-crds.yaml -kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.14.0/serving-core.yaml -kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.14.0/kourier.yaml +#### Install and configure a networking layer -# Install cert-manager -kubectl apply -f ./third_party/cert-manager-latest/cert-manager.yaml +Follow the instructions in the +[Knative installation doc](https://knative.dev/docs/admin/install/serving/install-serving-with-yaml/#install-a-networking-layer) for more complete instructions on networking layers. -kubectl patch configmap/config-network \ +**NOTE:** this documentation was tested only with `istio` and `kourier`. + +##### Istio + +To setup the `autoscaler-keda` extension with `istio` follow the [istio development instructions](./ISTIO_DOC.md) + +##### Kourier + +```bash +$ kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.16.0/kourier.yaml + +$ kubectl patch configmap/config-network \ -n knative-serving \ --type merge \ -p '{"data":{"ingress.class":"kourier.ingress.networking.knative.dev"}}' +``` + +Check that all pods in a `Running` state: -kubectl patch configmap/config-domain \ +```bash +$ kubectl get po -n knative-serving +NAME READY STATUS RESTARTS AGE +activator-d66fd5dd8-875zt 1/1 Running 0 2m41s +autoscaler-6c7bf97997-clc7q 1/1 Running 0 2m41s +controller-5b54cd98c-brd6l 1/1 Running 0 2m41s +net-kourier-controller-5db85876d8-7hnr7 1/1 Running 0 2m30s +webhook-56ffd84996-qskmt 1/1 Running 0 2m41s +``` + +##### Configure knative domain + +```bash +$ kubectl patch configmap/config-domain \ --namespace knative-serving \ --type merge \ --patch '{"data":{"example.com":""}}' +``` -kubectl get po -n knative-serving +#### Install Prometheus and KEDA -NAME READY STATUS RESTARTS AGE -activator-58db57894b-ng4s5 1/1 Running 0 27m -autoscaler-79d9fb98c-d79lg 1/1 Running 0 27m -autoscaler-keda-hpa-88cdb8764-kncd6 1/1 Running 0 3m45s -controller-d4645478c-ncqv5 1/1 Running 0 27m -net-kourier-controller-6559c556d7-g8mcg 1/1 Running 0 27m -webhook-dddf6fcff-k99gl 1/1 Running 0 27m - -start minikube tunnel on another terminal - -# install Prometheus and KEDA - -cat ../values.yaml -kube-state-metrics: - metricLabelsAllowlist: - - pods=[*] - - deployments=[app.kubernetes.io/name,app.kubernetes.io/component,app.kubernetes.io/instance] -prometheus: - prometheusSpec: - serviceMonitorSelectorNilUsesHelmValues: false - podMonitorSelectorNilUsesHelmValues: false -grafana: - sidecar: - dashboards: - enabled: true - searchNamespace: ALL -prometheus-node-exporter: - hostRootFsMount: - enabled: false - - -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo add kedacore https://kedacore.github.io/charts -helm repo update - -helm install prometheus prometheus-community/kube-prometheus-stack -n default -f values.yaml -helm install keda kedacore/keda --namespace keda --create-namespace - -$ kubectl get po +```bash +$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +$ helm repo add kedacore https://kedacore.github.io/charts +$ helm repo update + +$ helm install prometheus prometheus-community/kube-prometheus-stack -f values.yaml +$ helm install keda kedacore/keda --namespace keda --create-namespace +``` + +Check that all pods in a `Running` state: + +```bash +$ kubectl get po -n default NAME READY STATUS RESTARTS AGE -alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 117m -metrics-test-00001-deployment-5f797f796d-r7tmw 2/2 Running 0 106m -prometheus-grafana-d5679d5d7-q2pth 3/3 Running 0 118m -prometheus-kube-prometheus-operator-ffc85ddd8-g2wvx 1/1 Running 0 118m -prometheus-kube-state-metrics-8759cbf44-jw49l 1/1 Running 0 118m -prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 117m -prometheus-prometheus-node-exporter-q5qzv 1/1 Running 0 118m +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 61s +prometheus-grafana-69f9ccfd8d-svcpn 3/3 Running 0 76s +prometheus-kube-prometheus-operator-6f4fc4dcbd-gh2t2 1/1 Running 0 76s +prometheus-kube-state-metrics-57c8464f66-487vm 1/1 Running 0 76s +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 61s +prometheus-prometheus-node-exporter-wkn2w 1/1 Running 0 76s +``` + +```bash +$ kubectl get po -n keda +NAME READY STATUS RESTARTS AGE +keda-admission-webhooks-685d94fcff-krrgd 1/1 Running 0 57s +keda-operator-65f5568c7b-rghfz 1/1 Running 1 (43s ago) 57s +keda-operator-metrics-apiserver-69c577c9cf-6bp5k 1/1 Running 0 57s +``` +#### Install KEDA autoscaler -# Install KEDA autoscaler -ko apply -f config/ +```bash +$ ko apply -f config/ ``` -### Run a ksvc with Keda HPA support +```bash +$ kubectl get po -n knative-serving +NAME READY STATUS RESTARTS AGE +activator-d66fd5dd8-z2rt8 1/1 Running 0 12m +autoscaler-6c7bf97997-ds4g9 1/1 Running 0 12m +autoscaler-keda-5b5576c47b-gcjsf 1/1 Running 0 10m +controller-5b54cd98c-dmht8 1/1 Running 0 12m +net-kourier-controller-5db85876d8-nvcj5 1/1 Running 0 12m +webhook-56ffd84996-65qb2 1/1 Running 0 12m +``` + +#### Run a ksvc with Keda HPA support Apply the [service.yaml](./test/test_images/metrics-test/service.yaml) and wait for the service to be ready. +```bash +$ ko apply -f ./test/test_images/metrics-test/service.yaml ``` -ko apply -f ./test/test_images/metrics-test/service.yaml + +Check the `ksvc` and the created `scaledobject`: +```bash +$ kubectl get ksvc +NAME URL LATESTCREATED LATESTREADY READY REASON +metrics-test http://metrics-test.default.example.com metrics-test-00001 metrics-test-00001 True + +$ kubectl get scaledobject +NAME SCALETARGETKIND SCALETARGETNAME MIN MAX READY ACTIVE FALLBACK PAUSED TRIGGERS AUTHENTICATIONS AGE +metrics-test-00001 apps/v1.Deployment metrics-test-00001-deployment 1 10 True False False Unknown 3m32s + +$ kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +metrics-test-00001 Deployment/metrics-test-00001-deployment 0/5 (avg) 1 10 1 5m27s ``` -### Let's create some traffic +**NOTE:** as there is no traffic, the metric reported is `0`. + +#### Test your deployment + +Enable tunnelling with minikube: +```bash +$ minikube tunnel ``` -for i in {1..1000}; do curl -H "Host: metrics-test.default.example.com " http://192.168.39.233:32370; done -kubectl get ksvc -NAME URL LATESTCREATED LATESTREADY READY REASON -metrics-test http://metrics-test.default.example.com metrics-test-00001 metrics-test-00001 True +Get the `kourier` service external-ip: + +```bash +$ kubectl get svc kourier -n kourier-system +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kourier LoadBalancer 10.110.53.103 127.0.0.1 80:30168/TCP,443:30836/TCP 16m +``` -kubectl get hpa -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -metrics-test-00001 Deployment/metrics-test-00001-deployment 263m/5 (avg) 1 10 10 27m +Let's create some traffic: -kubectl get scaledobjects -NAME SCALETARGETKIND SCALETARGETNAME MIN MAX TRIGGERS AUTHENTICATION READY ACTIVE FALLBACK PAUSED AGE -metrics-test-00001 apps/v1.Deployment metrics-test-00001-deployment 1 10 prometheus True True False Unknown 27m +```bash +for i in {1..100000}; do curl -H "Host: metrics-test.default.example.com " http://127.0.0.1:80; done +``` +The `ksvc` will scale based on the prometheus metric. + +```bash + +$ kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +metrics-test-00001 Deployment/metrics-test-00001-deployment 8191m/5 (avg) 1 10 10 8m17s + +$ kubectl get po -n default -l app=metrics-test +NAME READY STATUS RESTARTS AGE +metrics-test-00001-deployment-554cfbdcdc-5dmck 2/2 Running 0 58s +metrics-test-00001-deployment-554cfbdcdc-6njbs 2/2 Running 0 88s +metrics-test-00001-deployment-554cfbdcdc-6xg4p 2/2 Running 0 43s +metrics-test-00001-deployment-554cfbdcdc-7ts2b 2/2 Running 0 73s +metrics-test-00001-deployment-554cfbdcdc-8z2zb 2/2 Running 0 88s +metrics-test-00001-deployment-554cfbdcdc-g5x4v 2/2 Running 0 58s +metrics-test-00001-deployment-554cfbdcdc-gz5c2 2/2 Running 0 8m44s +metrics-test-00001-deployment-554cfbdcdc-hl2h8 2/2 Running 0 58s +metrics-test-00001-deployment-554cfbdcdc-q7nk5 2/2 Running 0 58s +metrics-test-00001-deployment-554cfbdcdc-rv6nx 2/2 Running 0 43s +``` -kubectl get po -NAME READY STATUS RESTARTS AGE -alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 120m -metrics-test-00001-deployment-5f797f796d-29qrb 2/2 Running 0 19s -metrics-test-00001-deployment-5f797f796d-86zmd 1/2 Running 0 4s -metrics-test-00001-deployment-5f797f796d-gfdjl 2/2 Running 0 4s -metrics-test-00001-deployment-5f797f796d-jms64 2/2 Running 0 19s -metrics-test-00001-deployment-5f797f796d-kblz4 2/2 Running 0 50s -metrics-test-00001-deployment-5f797f796d-kf4pd 2/2 Running 0 19s -metrics-test-00001-deployment-5f797f796d-r7tmw 2/2 Running 0 108m -metrics-test-00001-deployment-5f797f796d-rmqs5 2/2 Running 0 50s -metrics-test-00001-deployment-5f797f796d-t5mcq 2/2 Running 0 19s -metrics-test-00001-deployment-5f797f796d-vd4jm 2/2 Running 0 50s -prometheus-grafana-d5679d5d7-q2pth 3/3 Running 0 120m -prometheus-kube-prometheus-operator-ffc85ddd8-g2wvx 1/1 Running 0 120m -prometheus-kube-state-metrics-8759cbf44-jw49l 1/1 Running 0 120m -prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 120m -prometheus-prometheus-node-exporter-q5qzv 1/1 Running 0 120m -``` - -After traffic is send target replicas are increases to 5. The reason is that we have set a threshold for scaling to be 5 for that metric. -Let's see what Prometheus reports during peak load (`kubectl port-forward -n default svc/prometheus-operated 9090:9090`: + +After traffic is sent target replicas are increased until the max value is reached. The reason is that we have set a threshold for scaling to be 5 for that metric, and if we check the peak load, Prometheus reports (`kubectl port-forward -n default svc/prometheus-operated 9090:9090`: ![prom.png](prom.png) @@ -157,20 +223,21 @@ The full configuration is shown next: autoscaling.knative.dev/query: "sum(rate(http_requests_total{}[1m]))" ``` -After some cooldown period replicas are terminated back to 1: -``` -kubectl get po -NAME READY STATUS RESTARTS AGE - -NAME READY STATUS RESTARTS AGE -alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 125m -metrics-test-00001-deployment-5f797f796d-gfdjl 1/2 Terminating 0 5m42s -metrics-test-00001-deployment-5f797f796d-r7tmw 2/2 Running 0 114m -prometheus-grafana-d5679d5d7-q2pth 3/3 Running 0 126m -prometheus-kube-prometheus-operator-ffc85ddd8-g2wvx 1/1 Running 0 126m -prometheus-kube-state-metrics-8759cbf44-jw49l 1/1 Running 0 126m -prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 125m -prometheus-prometheus-node-exporter-q5qzv 1/1 Running 0 126m +After some cooldown period replicas are terminated back to 1 (see [keda cooldownPeriod](https://keda.sh/docs/2.14/concepts/scaling-deployments/#cooldownperiod)): + +```bash +$ kubectl get po -n default -l app=metrics-test +NAME READY STATUS RESTARTS AGE +metrics-test-00001-deployment-554cfbdcdc-5dmck 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-6njbs 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-6xg4p 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-7ts2b 1/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-8z2zb 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-g5x4v 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-gz5c2 2/2 Running 0 20m +metrics-test-00001-deployment-554cfbdcdc-hl2h8 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-q7nk5 2/2 Terminating 0 12m +metrics-test-00001-deployment-554cfbdcdc-rv6nx 1/2 Terminating 0 12m ``` ### Bring Your Own ScaledObject diff --git a/ISTIO_DOC.md b/ISTIO_DOC.md new file mode 100644 index 00000000..8708265a --- /dev/null +++ b/ISTIO_DOC.md @@ -0,0 +1,143 @@ +# Istio Setup + +## Simple-Mode + +In simple-mode, `istio` acts only as the ingress to `knative`. + +Install `istio`: + +```bash +$ kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.16.0/istio.yaml +$ kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.16.0/net-istio.yaml +``` + +Check that all pods are in a `Running` state: + +```bash +NAME READY STATUS RESTARTS AGE +activator-d66fd5dd8-9phqr 1/1 Running 0 15m +autoscaler-6c7bf97997-rkd2n 1/1 Running 0 15m +autoscaler-keda-7f87794cb7-g2wm8 1/1 Running 0 6m32s +controller-5b54cd98c-bzgpt 1/1 Running 0 15m +net-istio-controller-c9444c8ff-rwcp5 1/1 Running 0 11m +net-istio-webhook-66b6b6444c-zpxjg 1/1 Running 0 11m +webhook-56ffd84996-ck5tm 1/1 Running 0 15m +``` + +The testing reported in the [development instructions](./DEVELOPMENT.md) was conducted also for `istio` in simple-mode. +If mesh-mode is not required, continue the setup as documented in the main instructions. + +## Mesh-Mode + +Make sure to review the section of the `knative` documentation about installing istio in mesh-mode: [Knative installation doc](https://knative.dev/docs/admin/install/serving/install-serving-with-yaml/#install-a-networking-layer). + +Brief instructions are reported below for developing this extension. + +Install `istio`: + +```bash +$ kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.16.0/istio.yaml +$ kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.16.0/net-istio.yaml +``` + +### Add knative serving to the mesh + +Enable `istio-injection` in the `knative-serving` namespace: + +```bash +$ kubectl label namespace knative-serving istio-injection=enabled +``` + +Restart all deployments in the `knative-serving` namespace: + +```bash +$ kubectl rollout restart deploy -n knative-serving +``` + +Verify that the pods are now injected with the `istio` sidecar container (note the number of `READY` containers is `2`): + +```bash +kubectl get po -n knative-serving +NAME READY STATUS RESTARTS AGE +activator-5754bdb79d-9gpn5 2/2 Running 0 91s +autoscaler-57d66f69d9-cmv6j 2/2 Running 0 91s +controller-f48959855-lhrwz 2/2 Running 1 (90s ago) 91s +net-istio-controller-69858b66f7-qfnq8 1/1 Running 0 91s +net-istio-webhook-5645569675-jvj2r 2/2 Running 0 91s +webhook-778946f8-ftjps 2/2 Running 0 91s +``` + +### Configure knative domain + +```bash +$ kubectl patch configmap/config-domain \ + --namespace knative-serving \ + --type merge \ + --patch '{"data":{"example.com":""}}' +``` + +### Install Prometheus and KEDA + +In `PERMISSIVE` mode (under the `knative-serving` namespace), clear-text traffic and encrypted traffic are both allowed, with the latter preferred by `istio` when properly configured. The following instructions will not enable Mutual TLS between `knative`, `keda` and `prometheus`. If running in `STRICT` mode, `istio` provides [documentation](https://istio.io/latest/docs/ops/integrations/prometheus/#tls-settings) on how to setup mTLS between "data" plane pods for `prometheus`. + +Install `keda` and `prometheus` in their own namespaces: +```bash +$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +$ helm repo add kedacore https://kedacore.github.io/charts +$ helm repo update + +$ helm install prometheus prometheus-community/kube-prometheus-stack -f values.yaml --namespace prometheus --create-namespace +$ helm install keda kedacore/keda --namespace keda --create-namespace +``` + +Check that all pods are in a `Running` state: + +```bash +$ kubectl get po -n prometheus +kubectl get po -n prometheus +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 75s +prometheus-grafana-69f9ccfd8d-72xb8 3/3 Running 0 87s +prometheus-kube-prometheus-operator-6f4fc4dcbd-2hzqm 1/1 Running 0 87s +prometheus-kube-state-metrics-57c8464f66-7h25x 1/1 Running 0 87s +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 75s +prometheus-prometheus-node-exporter-h7gp6 1/1 Running 0 87s +``` + +```bash +$ kubectl get po -n keda +kubectl get po -n keda +NAME READY STATUS RESTARTS AGE +keda-admission-webhooks-685d94fcff-kjvlw 1/1 Running 0 80s +keda-operator-65f5568c7b-9kbc8 1/1 Running 1 (73s ago) 80s +keda-operator-metrics-apiserver-69c577c9cf-qxxhx 1/1 Running 0 80s +``` + +### Install KEDA autoscaler + +```bash +$ ko apply -f config/ +``` + +Patch the `config-autoscaler-keda` configmap to reflect the `prometheus` service in its own namespace: + +```bash +$ kubectl patch configmap/config-autoscaler-keda -n knative-serving --type merge -p '{"data": { "autoscaler.keda.prometheus-address": "http://prometheus-operated.prometheus.svc:9090"}}' +``` + +### Run a ksvc with Keda HPA support in the istio mesh + +```bash +$ ko apply -f ./test/test_images/metrics-test/service_istio_injected.yaml -- -n metrics-test-istio +``` + +This will create a new namespace `metrics-test-istio` with istio injection enabled. + +```bash +$ kubectl get po -n metrics-test-istio +NAME READY STATUS RESTARTS AGE +metrics-test-istio-00001-deployment-7d57bbb8d8-86zsg 3/3 Running 0 77s +``` + +To generate traffic and test the deployment follow the [development instructions](./DEVELOPMENT.md). The service will be accessible at `http://metrics-test-istio.metrics-test-istio.example.com`. + diff --git a/config/controller.yaml b/config/controller.yaml index e1cbd3a1..43e4a180 100644 --- a/config/controller.yaml +++ b/config/controller.yaml @@ -48,7 +48,7 @@ spec: serviceAccountName: controller containers: - name: autoscaler-keda - imagePullPolicy: Always + imagePullPolicy: IfNotPresent # This is the Go import path for the binary that is containerized # and substituted here. image: ko://knative.dev/autoscaler-keda/cmd/autoscaler-keda diff --git a/prom.png b/prom.png index 38fe2245..80862cdf 100644 Binary files a/prom.png and b/prom.png differ diff --git a/test/test_images/metrics-test/service.yaml b/test/test_images/metrics-test/service.yaml index 2508ce54..738d4bb9 100644 --- a/test/test_images/metrics-test/service.yaml +++ b/test/test_images/metrics-test/service.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - image: ko://knative.dev/autoscaler-keda/test/test_images/metrics-test/ - imagePullPolicy: Always + imagePullPolicy: IfNotPresent ports: - name: http1 containerPort: 8080 diff --git a/test/test_images/metrics-test/service_istio_injected.yaml b/test/test_images/metrics-test/service_istio_injected.yaml new file mode 100644 index 00000000..21e5874e --- /dev/null +++ b/test/test_images/metrics-test/service_istio_injected.yaml @@ -0,0 +1,75 @@ +# Copyright 2024 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + name: metrics-test-istio +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: metrics-test-istio +spec: + template: + metadata: + labels: + app: metrics-test-istio + annotations: + sidecar.istio.io/inject: "true" + autoscaling.knative.dev/minScale: "1" + autoscaling.knative.dev/maxScale: "10" + autoscaling.knative.dev/target: "5" + autoscaling.knative.dev/class: "hpa.autoscaling.knative.dev" + autoscaling.knative.dev/metric: "http_requests_total" + autoscaling.knative.dev/prometheus-query: "sum(rate(http_requests_total{}[1m]))" + spec: + containers: + - image: ko://knative.dev/autoscaler-keda/test/test_images/metrics-test/ + imagePullPolicy: IfNotPresent + ports: + - name: http1 + containerPort: 8080 +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: metrics-test-istio-sm +spec: + endpoints: + - port: metrics + scheme: http + namespaceSelector: {} + selector: + matchLabels: + name: metrics-test-istio-sm +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: metrics-test-istio-sm + name: metrics-test-istio-sm +spec: + ports: + - name: metrics + port: 9096 + protocol: TCP + targetPort: 9096 + selector: + serving.knative.dev/service: metrics-test-istio + type: ClusterIP