diff --git a/channels-rke2.yaml b/channels-rke2.yaml index 94417e17a..221e3a5da 100644 --- a/channels-rke2.yaml +++ b/channels-rke2.yaml @@ -1645,7 +1645,7 @@ releases: repo: rancher-rke2-charts version: 1.15.100 rancher-vsphere-csi: - repo: rancher-charts + repo: rancher-rke2-charts version: 3.0.1-rancher101 rke2-calico: repo: rancher-rke2-charts @@ -1820,7 +1820,7 @@ releases: repo: rancher-rke2-charts version: 1.15.100 rancher-vsphere-csi: - repo: rancher-charts + repo: rancher-rke2-charts version: 3.0.1-rancher101 rke2-calico: repo: rancher-rke2-charts @@ -1870,3 +1870,44 @@ releases: repo: rancher-rke2-charts version: v4.0.2-build2024020802 featureVersions: *featureVersions-v1 + - version: v1.27.13+rke2r1 + minChannelServerVersion: v2.7.11-alpha1 + maxChannelServerVersion: v2.8.99 + serverArgs: &serverArgs-v1-27-13-rke2r1 + <<: *serverArgs-v1-27-11-rke2r1 + datastore-endpoint: + type: string + datastore-cafile: + type: string + datastore-certfile: + type: string + datastore-keyfile: + type: string + agentArgs: *agentArgs-v1-25-15-rke2r2 + charts: &charts-v1-27-13-rke2r1 + <<: *charts-v1-27-12-rke2r1 + rke2-flannel: + repo: rancher-rke2-charts + version: v0.25.102 + rke2-canal: + repo: rancher-rke2-charts + version: v3.27.3-build2024042301 + rke2-ingress-nginx: + repo: rancher-rke2-charts + version: 4.9.100 + rke2-calico: + repo: rancher-rke2-charts + version: v3.27.300 + rke2-cilium: + repo: rancher-rke2-charts + version: 1.15.400 + harvester-cloud-provider: + repo: rancher-rke2-charts + version: 0.2.300 + rancher-vsphere-csi: + repo: rancher-rke2-charts + version: 3.1.2-rancher400 + rke2-metrics-server: + repo: rancher-rke2-charts + version: 3.12.002 + featureVersions: *featureVersions-v1 diff --git a/channels.yaml b/channels.yaml index c4d00cabd..1a6b0316b 100644 --- a/channels.yaml +++ b/channels.yaml @@ -557,4 +557,13 @@ releases: maxChannelServerVersion: v2.7.99 serverArgs: *serverArgs-v7 agentArgs: *agentArgs-v5 - featureVersions: *featureVersions-v1 \ No newline at end of file + featureVersions: *featureVersions-v1 + - version: v1.27.13+k3s1 + minChannelServerVersion: v2.7.11-alpha1 + maxChannelServerVersion: v2.7.99 + serverArgs: + <<: *serverArgs-v7 + kine-tls: + type: boolean + agentArgs: *agentArgs-v5 + featureVersions: *featureVersions-v1 diff --git a/data/data.json b/data/data.json index e3b8881a3..1b273238e 100644 --- a/data/data.json +++ b/data/data.json @@ -13021,6 +13021,47 @@ "aciOvsContainer": "noiro/openvswitch:6.0.4.1.81c2369", "aciControllerContainer": "noiro/aci-containers-controller:6.0.4.1.81c2369" }, + "v1.27.13-rancher1-1": { + "etcd": "rancher/mirrored-coreos-etcd:v3.5.10", + "alpine": "rancher/rke-tools:v0.1.96", + "nginxProxy": "rancher/rke-tools:v0.1.96", + "certDownloader": "rancher/rke-tools:v0.1.96", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.96", + "kubedns": "rancher/mirrored-k8s-dns-kube-dns:1.22.28", + "dnsmasq": "rancher/mirrored-k8s-dns-dnsmasq-nanny:1.22.28", + "kubednsSidecar": "rancher/mirrored-k8s-dns-sidecar:1.22.28", + "kubednsAutoscaler": "rancher/mirrored-cluster-proportional-autoscaler:v1.8.9", + "coredns": "rancher/mirrored-coredns-coredns:1.10.1", + "corednsAutoscaler": "rancher/mirrored-cluster-proportional-autoscaler:v1.8.9", + "nodelocal": "rancher/mirrored-k8s-dns-node-cache:1.22.28", + "kubernetes": "rancher/hyperkube:v1.27.13-rancher1", + "flannel": "rancher/mirrored-flannel-flannel:v0.21.4", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher8", + "calicoNode": "rancher/mirrored-calico-node:v3.26.3", + "calicoCni": "rancher/calico-cni:v3.26.3-rancher1", + "calicoControllers": "rancher/mirrored-calico-kube-controllers:v3.26.3", + "calicoCtl": "rancher/mirrored-calico-ctl:v3.26.3", + "calicoFlexVol": "rancher/mirrored-calico-pod2daemon-flexvol:v3.26.3", + "canalNode": "rancher/mirrored-calico-node:v3.26.3", + "canalCni": "rancher/calico-cni:v3.26.3-rancher1", + "canalControllers": "rancher/mirrored-calico-kube-controllers:v3.26.3", + "canalFlannel": "rancher/mirrored-flannel-flannel:v0.21.4", + "canalFlexVol": "rancher/mirrored-calico-pod2daemon-flexvol:v3.26.3", + "weaveNode": "weaveworks/weave-kube:2.8.1", + "weaveCni": "weaveworks/weave-npc:2.8.1", + "podInfraContainer": "rancher/mirrored-pause:3.7", + "ingress": "rancher/nginx-ingress-controller:nginx-1.9.4-rancher1", + "ingressBackend": "rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher1", + "ingressWebhook": "rancher/mirrored-ingress-nginx-kube-webhook-certgen:v20231011-8b53cabe0", + "metricsServer": "rancher/mirrored-metrics-server:v0.6.3", + "windowsPodInfraContainer": "rancher/mirrored-pause:3.7", + "aciCniDeployContainer": "noiro/cnideploy:6.0.4.1.81c2369", + "aciHostContainer": "noiro/aci-containers-host:6.0.4.1.81c2369", + "aciOpflexContainer": "noiro/opflex:6.0.4.1.81c2369", + "aciMcastContainer": "noiro/opflex:6.0.4.1.81c2369", + "aciOvsContainer": "noiro/openvswitch:6.0.4.1.81c2369", + "aciControllerContainer": "noiro/aci-containers-controller:6.0.4.1.81c2369" + }, "v1.27.6-rancher1-1": { "etcd": "rancher/mirrored-coreos-etcd:v3.5.7", "alpine": "rancher/rke-tools:v0.1.96", @@ -13358,7 +13399,8 @@ "\u003e=1.17.0-alpha \u003c1.20.15-rancher1-2": "coredns-v1.17", "\u003e=1.20.15-rancher1-2 \u003c1.21.0-rancher1-1": "coredns-v1.8.3-rancher2", "\u003e=1.21.0-rancher1-1 \u003c1.21.9-rancher1-2": "coredns-v1.8.3", - "\u003e=1.21.9-rancher1-2": "coredns-v1.8.3-rancher2", + "\u003e=1.21.9-rancher1-2 \u003c1.27.13-rancher1-1": "coredns-v1.8.3-rancher2", + "\u003e=1.27.13-rancher1-1 \u003c1.28.0-rancher1-1": "coredns-v1.8.3-rancher3", "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "coredns-v1.8" }, "flannel": { @@ -13485,6 +13527,7 @@ "coredns-v1.8": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n addonmanager.kubernetes.io/mode: Reconcile\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n addonmanager.kubernetes.io/mode: EnsureExists\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n Corefile: |\n .:53 {\n errors\n health\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n upstream\n fallthrough in-addr.arpa ip6.arpa\n ttl 30\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"CoreDNS\"\nspec:\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1}}\n{{end}}\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", "coredns-v1.8.3": "\n# Based on coredns/deployment/kubernetes/coredns.yaml.sed v1.8.3\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - discovery.k8s.io\n resources:\n - endpointslices\n verbs:\n - list\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n } # STUBDOMAINS - Rancher specific change\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n # replicas is not specified in upstream template, default is 1. \n # Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n replicas: 1\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n # Rancher specific change\n priorityClassName: {{ .CoreDNSPriorityClassName | default \"system-cluster-critical\" }}\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n# Rancher specific change\n{{- if .CoreDNSAutoscalerPriorityClassName }}\n priorityClassName: {{ .CoreDNSAutoscalerPriorityClassName }}\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n", "coredns-v1.8.3-rancher2": "\n# Based on coredns/deployment/kubernetes/coredns.yaml.sed v1.8.3\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - discovery.k8s.io\n resources:\n - endpointslices\n verbs:\n - list\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n } # STUBDOMAINS - Rancher specific change\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n # replicas is not specified in upstream template, default is 1. \n # Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n replicas: 1\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n # Rancher specific change\n priorityClassName: {{ .CoreDNSPriorityClassName | default \"system-cluster-critical\" }}\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n# Rancher specific change\n{{- if .CoreDNSAutoscalerPriorityClassName }}\n priorityClassName: {{ .CoreDNSAutoscalerPriorityClassName }}\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 5\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n", + "coredns-v1.8.3-rancher3": "\n# Based on coredns/deployment/kubernetes/coredns.yaml.sed v1.8.3\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - discovery.k8s.io\n resources:\n - endpointslices\n verbs:\n - list\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n } # STUBDOMAINS - Rancher specific change\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n # replicas is not specified in upstream template, default is 1. \n # Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n # Rancher specific change\n priorityClassName: {{ .CoreDNSPriorityClassName | default \"system-cluster-critical\" }}\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n nodeSelector:\n kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n seccompProfile:\n type: RuntimeDefault\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n# Rancher specific change\n{{- if .CoreDNSAutoscalerPriorityClassName }}\n priorityClassName: {{ .CoreDNSAutoscalerPriorityClassName }}\n{{- end }}\n nodeSelector:\n kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n{{- if .Tolerations}}\n tolerations:\n{{ toYaml .Tolerations | indent 6}}\n{{- else }}\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n{{- end }}\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 5\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --nodelabels=node-role.kubernetes.io/worker=true,kubernetes.io/os=linux\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n", "flannel-v0.14.0": "\n---\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: psp.flannel.unprivileged\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n privileged: false\n volumes:\n - configMap\n - secret\n - emptyDir\n - hostPath\n allowedHostPaths:\n - pathPrefix: \"/etc/cni/net.d\"\n - pathPrefix: \"/etc/kube-flannel\"\n - pathPrefix: \"/run/flannel\"\n readOnlyRootFilesystem: false\n # Users and groups\n runAsUser:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n fsGroup:\n rule: RunAsAny\n # Privilege Escalation\n allowPrivilegeEscalation: false\n defaultAllowPrivilegeEscalation: false\n # Capabilities\n allowedCapabilities: ['NET_ADMIN', 'NET_RAW']\n defaultAddCapabilities: []\n requiredDropCapabilities: []\n # Host namespaces\n hostPID: false\n hostIPC: false\n hostNetwork: true\n hostPorts:\n - min: 0\n max: 65535\n # SELinux\n seLinux:\n # SELinux is unused in CaaSP\n rule: 'RunAsAny'\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: ['psp.flannel.unprivileged']\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n{{- end}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: kube-system\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\": \"cbr0\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"forceAddress\": true,\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: kube-system\n labels:\n tier: node\n k8s-app: flannel\nspec:\n selector:\n matchLabels:\n k8s-app: flannel\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: kubernetes.io/os\n operator: In\n values:\n - linux\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n hostNetwork: true\n# Rancher specific change\n{{- if .KubeFlannelPriorityClassName }}\n priorityClassName: {{ .KubeFlannelPriorityClassName }}\n{{- end }}\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n priorityClassName: system-node-critical\n tolerations:\n {{- if ge .ClusterVersion \"v1.12\" }}\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n {{- else }}\n - key: node-role.kubernetes.io/controlplane\n operator: Exists\n effect: NoSchedule\n - key: node-role.kubernetes.io/etcd\n operator: Exists\n effect: NoExecute\n {{- end }}\n serviceAccountName: flannel\n containers:\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n - name: kube-flannel\n image: {{.Image}}\n command:\n - /opt/bin/flanneld\n args:\n - --ip-masq\n - --kube-subnet-mgr\n {{- if .FlannelInterface}}\n - --iface={{.FlannelInterface}}\n {{end}}\n resources:\n requests:\n cpu: \"100m\"\n memory: \"50Mi\"\n limits:\n cpu: \"100m\"\n memory: \"50Mi\"\n securityContext:\n seLinuxOptions:\n type: rke_network_t\n privileged: false\n capabilities:\n add: [\"NET_ADMIN\", \"NET_RAW\"]\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: run\n mountPath: /run\n - name: cni\n mountPath: /etc/cni/net.d\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n - name: run\n hostPath:\n path: /run\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n", "flannel-v0.14.0-rancher2": "\n---\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: psp.flannel.unprivileged\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n privileged: false\n volumes:\n - configMap\n - secret\n - emptyDir\n - hostPath\n allowedHostPaths:\n - pathPrefix: \"/etc/cni/net.d\"\n - pathPrefix: \"/etc/kube-flannel\"\n - pathPrefix: \"/run/flannel\"\n readOnlyRootFilesystem: false\n # Users and groups\n runAsUser:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n fsGroup:\n rule: RunAsAny\n # Privilege Escalation\n allowPrivilegeEscalation: false\n defaultAllowPrivilegeEscalation: false\n # Capabilities\n allowedCapabilities: ['NET_ADMIN', 'NET_RAW']\n defaultAddCapabilities: []\n requiredDropCapabilities: []\n # Host namespaces\n hostPID: false\n hostIPC: false\n hostNetwork: true\n hostPorts:\n - min: 0\n max: 65535\n # SELinux\n seLinux:\n # SELinux is unused in CaaSP\n rule: 'RunAsAny'\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: ['psp.flannel.unprivileged']\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n{{- end}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: kube-system\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\": \"cbr0\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"forceAddress\": true,\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: kube-system\n labels:\n tier: node\n k8s-app: flannel\nspec:\n selector:\n matchLabels:\n k8s-app: flannel\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: kubernetes.io/os\n operator: In\n values:\n - linux\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n hostNetwork: true\n# Rancher specific change\n{{- if .KubeFlannelPriorityClassName }}\n priorityClassName: {{ .KubeFlannelPriorityClassName }}\n{{- end }}\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n priorityClassName: system-node-critical\n tolerations:\n {{- if ge .ClusterVersion \"v1.12\" }}\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n {{- else }}\n - key: node-role.kubernetes.io/controlplane\n operator: Exists\n effect: NoSchedule\n - key: node-role.kubernetes.io/etcd\n operator: Exists\n effect: NoExecute\n {{- end }}\n serviceAccountName: flannel\n containers:\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n - name: kube-flannel\n image: {{.Image}}\n command:\n - /opt/bin/flanneld\n args:\n - --ip-masq\n - --kube-subnet-mgr\n {{- if .FlannelInterface}}\n - --iface={{.FlannelInterface}}\n {{end}}\n resources:\n requests:\n cpu: \"100m\"\n memory: \"50Mi\"\n securityContext:\n seLinuxOptions:\n type: rke_network_t\n privileged: false\n capabilities:\n add: [\"NET_ADMIN\", \"NET_RAW\"]\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: run\n mountPath: /run\n - name: cni\n mountPath: /etc/cni/net.d\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n - name: run\n hostPath:\n path: /run\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n", "flannel-v0.19.2": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n{{- end}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: kube-system\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\": \"cbr0\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"forceAddress\": true,\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: kube-system\n labels:\n tier: node\n k8s-app: flannel\nspec:\n selector:\n matchLabels:\n k8s-app: flannel\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: kubernetes.io/os\n operator: In\n values:\n - linux\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n hostNetwork: true\n# Rancher specific change\n priorityClassName: {{ .KubeFlannelPriorityClassName | default \"system-node-critical\" }}\n tolerations:\n {{- if ge .ClusterVersion \"v1.12\" }}\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n {{- else }}\n - key: node-role.kubernetes.io/controlplane\n operator: Exists\n effect: NoSchedule\n - key: node-role.kubernetes.io/etcd\n operator: Exists\n effect: NoExecute\n {{- end }}\n serviceAccountName: flannel\n containers:\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n - name: kube-flannel\n image: {{.Image}}\n command:\n - /opt/bin/flanneld\n args:\n - --ip-masq\n - --kube-subnet-mgr\n {{- if .FlannelInterface}}\n - --iface={{.FlannelInterface}}\n {{end}}\n resources:\n requests:\n cpu: \"100m\"\n memory: \"50Mi\"\n securityContext:\n seLinuxOptions:\n type: rke_network_t\n privileged: false\n capabilities:\n add: [\"NET_ADMIN\", \"NET_RAW\"]\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: EVENT_QUEUE_DEPTH\n value: \"5000\"\n volumeMounts:\n - name: run\n mountPath: /run/flannel\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: xtables-lock\n mountPath: /run/xtables.lock\n volumes:\n - name: run\n hostPath:\n path: /run/flannel\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n", @@ -14109,7 +14152,7 @@ }, "RKEDefaultK8sVersions": { "0.3": "v1.16.3-rancher1-1", - "default": "v1.27.12-rancher1-1" + "default": "v1.27.13-rancher1-1" }, "K8sVersionDockerInfo": { "1.10": [ @@ -25615,6 +25658,189 @@ } }, "version": "v1.27.12+k3s1" + }, + { + "agentArgs": { + "default-runtime": { + "type": "string" + }, + "disable-apiserver-lb": { + "type": "boolean" + }, + "disable-default-registry-endpoint": { + "type": "boolean" + }, + "docker": { + "default": false, + "type": "boolean" + }, + "flannel-conf": { + "type": "string" + }, + "flannel-iface": { + "type": "string" + }, + "kube-proxy-arg": { + "type": "array" + }, + "kubelet-arg": { + "type": "array" + }, + "pause-image": { + "type": "string" + }, + "protect-kernel-defaults": { + "default": false, + "type": "boolean" + }, + "resolv-conf": { + "type": "string" + }, + "selinux": { + "default": false, + "type": "boolean" + }, + "snapshotter": { + "type": "string" + }, + "system-default-registry": { + "type": "string" + }, + "vpn-auth": { + "type": "string" + }, + "vpn-auth-file": { + "type": "string" + } + }, + "featureVersions": { + "encryption-key-rotation": "2.0.0" + }, + "maxChannelServerVersion": "v2.7.99", + "minChannelServerVersion": "v2.7.11-alpha1", + "serverArgs": { + "cluster-cidr": { + "type": "string" + }, + "cluster-dns": { + "type": "string" + }, + "cluster-domain": { + "type": "string" + }, + "datastore-cafile": { + "type": "string" + }, + "datastore-certfile": { + "type": "string" + }, + "datastore-endpoint": { + "type": "string" + }, + "datastore-keyfile": { + "type": "string" + }, + "default-local-storage-path": { + "type": "string" + }, + "disable": { + "options": [ + "coredns", + "servicelb", + "traefik", + "local-storage", + "metrics-server" + ], + "type": "array" + }, + "disable-apiserver": { + "default": false, + "type": "boolean" + }, + "disable-cloud-controller": { + "default": false, + "type": "boolean" + }, + "disable-controller-manager": { + "default": false, + "type": "boolean" + }, + "disable-etcd": { + "default": false, + "type": "boolean" + }, + "disable-kube-proxy": { + "default": false, + "type": "boolean" + }, + "disable-network-policy": { + "default": false, + "type": "boolean" + }, + "disable-scheduler": { + "default": false, + "type": "boolean" + }, + "egress-selector-mode": { + "type": "string" + }, + "embedded-registry": { + "type": "boolean" + }, + "etcd-arg": { + "type": "array" + }, + "etcd-expose-metrics": { + "default": false, + "type": "boolean" + }, + "flannel-backend": { + "options": [ + "none", + "vxlan", + "ipsec", + "host-gw", + "wireguard", + "wireguard-native" + ], + "type": "enum" + }, + "helm-job-image": { + "type": "string" + }, + "kine-tls": { + "type": "boolean" + }, + "kube-apiserver-arg": { + "type": "array" + }, + "kube-cloud-controller-manager-arg": { + "type": "array" + }, + "kube-controller-manager-arg": { + "type": "array" + }, + "kube-scheduler-arg": { + "type": "array" + }, + "secrets-encryption": { + "default": false, + "type": "boolean" + }, + "service-cidr": { + "type": "string" + }, + "service-node-port-range": { + "type": "string" + }, + "tls-san": { + "type": "array" + }, + "tls-san-security": { + "type": "boolean" + } + }, + "version": "v1.27.13+k3s1" } ] }, @@ -41325,7 +41551,7 @@ "version": "1.5.100" }, "rancher-vsphere-csi": { - "repo": "rancher-charts", + "repo": "rancher-rke2-charts", "version": "3.0.1-rancher101" }, "rke2-calico": { @@ -42931,7 +43157,7 @@ "version": "1.5.100" }, "rancher-vsphere-csi": { - "repo": "rancher-charts", + "repo": "rancher-rke2-charts", "version": "3.0.1-rancher101" }, "rke2-calico": { @@ -43363,6 +43589,291 @@ } }, "version": "v1.27.12+rke2r1" + }, + { + "agentArgs": { + "audit-policy-file": { + "type": "string" + }, + "cloud-controller-manager-extra-env": { + "type": "array" + }, + "cloud-controller-manager-extra-mount": { + "type": "array" + }, + "cloud-provider-config": { + "type": "string" + }, + "cloud-provider-name": { + "default": null, + "nullable": true, + "options": [ + "aws", + "azure", + "gcp", + "rancher-vsphere", + "harvester", + "external" + ], + "type": "enum" + }, + "control-plane-resource-limits": { + "type": "string" + }, + "control-plane-resource-requests": { + "type": "string" + }, + "etcd-extra-env": { + "type": "array" + }, + "etcd-extra-mount": { + "type": "array" + }, + "kube-apiserver-extra-env": { + "type": "array" + }, + "kube-apiserver-extra-mount": { + "type": "array" + }, + "kube-controller-manager-extra-env": { + "type": "array" + }, + "kube-controller-manager-extra-mount": { + "type": "array" + }, + "kube-proxy-arg": { + "type": "array" + }, + "kube-proxy-extra-env": { + "type": "array" + }, + "kube-proxy-extra-mount": { + "type": "array" + }, + "kube-scheduler-extra-env": { + "type": "array" + }, + "kube-scheduler-extra-mount": { + "type": "array" + }, + "kubelet-arg": { + "type": "array" + }, + "profile": { + "nullable": true, + "options": [ + "cis", + "cis-1.23" + ], + "type": "enum" + }, + "protect-kernel-defaults": { + "default": false, + "type": "boolean" + }, + "resolv-conf": { + "type": "string" + }, + "selinux": { + "type": "bool" + }, + "system-default-registry": { + "type": "string" + } + }, + "charts": { + "harvester-cloud-provider": { + "repo": "rancher-rke2-charts", + "version": "0.2.300" + }, + "harvester-csi-driver": { + "repo": "rancher-rke2-charts", + "version": "0.1.1700" + }, + "rancher-vsphere-cpi": { + "repo": "rancher-rke2-charts", + "version": "1.7.001" + }, + "rancher-vsphere-csi": { + "repo": "rancher-rke2-charts", + "version": "3.1.2-rancher400" + }, + "rke2-calico": { + "repo": "rancher-rke2-charts", + "version": "v3.27.300" + }, + "rke2-calico-crd": { + "repo": "rancher-rke2-charts", + "version": "v3.27.002" + }, + "rke2-canal": { + "repo": "rancher-rke2-charts", + "version": "v3.27.3-build2024042301" + }, + "rke2-cilium": { + "repo": "rancher-rke2-charts", + "version": "1.15.400" + }, + "rke2-coredns": { + "repo": "rancher-rke2-charts", + "version": "1.29.002" + }, + "rke2-flannel": { + "repo": "rancher-rke2-charts", + "version": "v0.25.102" + }, + "rke2-ingress-nginx": { + "repo": "rancher-rke2-charts", + "version": "4.9.100" + }, + "rke2-metrics-server": { + "repo": "rancher-rke2-charts", + "version": "3.12.002" + }, + "rke2-multus": { + "repo": "rancher-rke2-charts", + "version": "v4.0.2-build2024020802" + }, + "rke2-snapshot-controller": { + "repo": "rancher-rke2-charts", + "version": "1.7.202" + }, + "rke2-snapshot-controller-crd": { + "repo": "rancher-rke2-charts", + "version": "1.7.202" + }, + "rke2-snapshot-validation-webhook": { + "repo": "rancher-rke2-charts", + "version": "1.7.302" + } + }, + "featureVersions": { + "encryption-key-rotation": "2.0.0" + }, + "maxChannelServerVersion": "v2.8.99", + "minChannelServerVersion": "v2.7.11-alpha1", + "serverArgs": { + "audit-policy-file": { + "type": "string" + }, + "cluster-cidr": { + "type": "string" + }, + "cluster-dns": { + "type": "string" + }, + "cluster-domain": { + "type": "string" + }, + "cni": { + "default": "calico", + "options": [ + "canal", + "cilium", + "calico", + "flannel", + "multus,canal", + "multus,cilium", + "multus,calico" + ], + "type": "array" + }, + "container-runtime-endpoint": { + "type": "string" + }, + "datastore-cafile": { + "type": "string" + }, + "datastore-certfile": { + "type": "string" + }, + "datastore-endpoint": { + "type": "string" + }, + "datastore-keyfile": { + "type": "string" + }, + "disable": { + "options": [ + "rke2-coredns", + "rke2-ingress-nginx", + "rke2-metrics-server" + ], + "type": "array" + }, + "disable-cloud-controller": { + "type": "bool" + }, + "disable-kube-proxy": { + "default": false, + "type": "boolean" + }, + "disable-scheduler": { + "type": "bool" + }, + "egress-selector-mode": { + "type": "string" + }, + "etcd-arg": { + "type": "array" + }, + "etcd-expose-metrics": { + "default": false, + "type": "boolean" + }, + "etcd-image": { + "type": "string" + }, + "kube-apiserver-arg": { + "type": "array" + }, + "kube-apiserver-image": { + "type": "string" + }, + "kube-cloud-controller-manager-arg": { + "type": "array" + }, + "kube-controller-manager-arg": { + "type": "array" + }, + "kube-controller-manager-image": { + "type": "string" + }, + "kube-proxy-arg": { + "type": "array" + }, + "kube-scheduler-arg": { + "type": "array" + }, + "kube-scheduler-image": { + "type": "string" + }, + "kubelet-path": { + "type": "string" + }, + "pause-image": { + "type": "string" + }, + "runtime-image": { + "type": "string" + }, + "service-cidr": { + "type": "string" + }, + "service-node-port-range": { + "type": "string" + }, + "snapshotter": { + "type": "string" + }, + "tls-san": { + "type": "array" + }, + "tls-san-security": { + "type": "boolean" + } + }, + "version": "v1.27.13+rke2r1" } ] } diff --git a/pkg/rke/k8s_defaults.go b/pkg/rke/k8s_defaults.go index 72baa045e..663cc5ef0 100644 --- a/pkg/rke/k8s_defaults.go +++ b/pkg/rke/k8s_defaults.go @@ -128,7 +128,7 @@ func validateTemplateMatch() { for toTestRange, key := range pluginData { testRange, err := semver.ParseRange(toTestRange) if err != nil { - panic(fmt.Sprintf("range for %s not sem-ver %v %v", plugin, testRange, err)) + panic(fmt.Sprintf("range for %s not sem-ver %v %v", plugin, toTestRange, err)) } if testRange(toMatch) { // only one range should be matched diff --git a/pkg/rke/k8s_rke_system_images.go b/pkg/rke/k8s_rke_system_images.go index 04569c17b..862597948 100644 --- a/pkg/rke/k8s_rke_system_images.go +++ b/pkg/rke/k8s_rke_system_images.go @@ -10226,6 +10226,48 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { WindowsPodInfraContainer: "rancher/mirrored-pause:3.7", Nodelocal: "rancher/mirrored-k8s-dns-node-cache:1.22.28", }, + // Enabled in Rancher v2.7.13 + "v1.27.13-rancher1-1": { + Etcd: "rancher/mirrored-coreos-etcd:v3.5.10", + Kubernetes: "rancher/hyperkube:v1.27.13-rancher1", + Alpine: "rancher/rke-tools:v0.1.96", + NginxProxy: "rancher/rke-tools:v0.1.96", + CertDownloader: "rancher/rke-tools:v0.1.96", + KubernetesServicesSidecar: "rancher/rke-tools:v0.1.96", + KubeDNS: "rancher/mirrored-k8s-dns-kube-dns:1.22.28", + DNSmasq: "rancher/mirrored-k8s-dns-dnsmasq-nanny:1.22.28", + KubeDNSSidecar: "rancher/mirrored-k8s-dns-sidecar:1.22.28", + KubeDNSAutoscaler: "rancher/mirrored-cluster-proportional-autoscaler:v1.8.9", + Flannel: "rancher/mirrored-flannel-flannel:v0.21.4", + FlannelCNI: "rancher/flannel-cni:v0.3.0-rancher8", + CalicoNode: "rancher/mirrored-calico-node:v3.26.3", + CalicoCNI: "rancher/calico-cni:v3.26.3-rancher1", + CalicoControllers: "rancher/mirrored-calico-kube-controllers:v3.26.3", + CalicoCtl: "rancher/mirrored-calico-ctl:v3.26.3", + CalicoFlexVol: "rancher/mirrored-calico-pod2daemon-flexvol:v3.26.3", + CanalNode: "rancher/mirrored-calico-node:v3.26.3", + CanalCNI: "rancher/calico-cni:v3.26.3-rancher1", + CanalControllers: "rancher/mirrored-calico-kube-controllers:v3.26.3", + CanalFlannel: "rancher/mirrored-flannel-flannel:v0.21.4", + CanalFlexVol: "rancher/mirrored-calico-pod2daemon-flexvol:v3.26.3", + WeaveNode: "weaveworks/weave-kube:2.8.1", + WeaveCNI: "weaveworks/weave-npc:2.8.1", + AciCniDeployContainer: "noiro/cnideploy:6.0.4.1.81c2369", + AciHostContainer: "noiro/aci-containers-host:6.0.4.1.81c2369", + AciOpflexContainer: "noiro/opflex:6.0.4.1.81c2369", + AciMcastContainer: "noiro/opflex:6.0.4.1.81c2369", + AciOpenvSwitchContainer: "noiro/openvswitch:6.0.4.1.81c2369", + AciControllerContainer: "noiro/aci-containers-controller:6.0.4.1.81c2369", + PodInfraContainer: "rancher/mirrored-pause:3.7", + Ingress: "rancher/nginx-ingress-controller:nginx-1.9.4-rancher1", + IngressBackend: "rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher1", + IngressWebhook: "rancher/mirrored-ingress-nginx-kube-webhook-certgen:v20231011-8b53cabe0", + MetricsServer: "rancher/mirrored-metrics-server:v0.6.3", + CoreDNS: "rancher/mirrored-coredns-coredns:1.10.1", + CoreDNSAutoscaler: "rancher/mirrored-cluster-proportional-autoscaler:v1.8.9", + WindowsPodInfraContainer: "rancher/mirrored-pause:3.7", + Nodelocal: "rancher/mirrored-k8s-dns-node-cache:1.22.28", + }, // k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters // without all clusters being restarted "v1.11.9-rancher1-3": { diff --git a/pkg/rke/k8s_version_info.go b/pkg/rke/k8s_version_info.go index 69181e1e6..76ea2468e 100644 --- a/pkg/rke/k8s_version_info.go +++ b/pkg/rke/k8s_version_info.go @@ -61,7 +61,7 @@ func loadRKEDefaultK8sVersions() map[string]string { return map[string]string{ "0.3": "v1.16.3-rancher1-1", // rke will use default if its version is absent - "default": "v1.27.12-rancher1-1", + "default": "v1.27.13-rancher1-1", } } diff --git a/pkg/rke/templates/coredns_v1.8.3-rancher3.go b/pkg/rke/templates/coredns_v1.8.3-rancher3.go new file mode 100644 index 000000000..da034b352 --- /dev/null +++ b/pkg/rke/templates/coredns_v1.8.3-rancher3.go @@ -0,0 +1,370 @@ +package templates + +/* +Rancher Changelog: +- coredns deployment: liveness and readiness probe configuration +- coredns deployment: drop the replica +- coredns deployment: replace the usage of `beta.kubernetes.io/os: linux` with `kubernetes.io/os: linux` +- coredns deployment: replace the seccomp annotation with seccompProfile +- coredns-autoscaler deployment: liveness and readiness probe configuration +- coredns-autoscaler deployment: replace the usage of `beta.kubernetes.io/os: linux` with `kubernetes.io/os: linux` +*/ + +const CoreDNSTemplateV183Rancher3 = ` +# Based on coredns/deployment/kubernetes/coredns.yaml.sed v1.8.3 +--- +{{- if eq .RBACConfig "rbac"}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + {{- if .UpstreamNameservers }} + forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}} + {{- else }} + forward . "/etc/resolv.conf" + {{- end }} + cache 30 + loop + reload + loadbalance + } # STUBDOMAINS - Rancher specific change +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + # replicas is not specified in upstream template, default is 1. + # Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: +{{if .UpdateStrategy}} +{{ toYaml .UpdateStrategy | indent 4}} +{{else}} + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 +{{end}} + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + # Rancher specific change + priorityClassName: {{ .CoreDNSPriorityClassName | default "system-cluster-critical" }} +{{- if eq .RBACConfig "rbac"}} + serviceAccountName: coredns +{{- end }} +{{- if .Tolerations}} + tolerations: +{{ toYaml .Tolerations | indent 6}} +{{- else }} + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists +{{- end }} + nodeSelector: + kubernetes.io/os: linux + {{ range $k, $v := .NodeSelector }} + {{ $k }}: "{{ $v }}" + {{ end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: ["kube-dns"] + topologyKey: kubernetes.io/hostname + containers: + - name: coredns + image: {{.CoreDNSImage}} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{.ClusterDNSServer}} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns-autoscaler + namespace: kube-system + labels: + k8s-app: coredns-autoscaler +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + labels: + k8s-app: coredns-autoscaler + spec: +{{- if eq .RBACConfig "rbac"}} + serviceAccountName: coredns-autoscaler +{{- end }} +# Rancher specific change +{{- if .CoreDNSAutoscalerPriorityClassName }} + priorityClassName: {{ .CoreDNSAutoscalerPriorityClassName }} +{{- end }} + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists +{{- if .Tolerations}} + tolerations: +{{ toYaml .Tolerations | indent 6}} +{{- else }} + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists +{{- end }} + containers: + - name: autoscaler + image: {{.CoreDNSAutoScalerImage}} + resources: + requests: + cpu: "20m" + memory: "10Mi" + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. +{{if .LinearAutoscalerParams}} + - --default-params={"linear":{{.LinearAutoscalerParams}}} +{{else}} + - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}} +{{end}} + - --nodelabels=node-role.kubernetes.io/worker=true,kubernetes.io/os=linux + - --logtostderr=true + - --v=2 +{{- if eq .RBACConfig "rbac"}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns-autoscaler + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:coredns-autoscaler +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions","apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:coredns-autoscaler +subjects: + - kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:coredns-autoscaler + apiGroup: rbac.authorization.k8s.io +{{- end }} +` diff --git a/pkg/rke/templates/templates.go b/pkg/rke/templates/templates.go index 4765d01b8..432f30b99 100644 --- a/pkg/rke/templates/templates.go +++ b/pkg/rke/templates/templates.go @@ -66,6 +66,7 @@ const ( coreDnsv117 = "coredns-v1.17" coreDnsv183 = "coredns-v1.8.3" coreDnsv183Rancher2 = "coredns-v1.8.3-rancher2" + coreDnsv183Rancher3 = "coredns-v1.8.3-rancher3" kubeDnsv18 = "kubedns-v1.8" kubeDnsv116 = "kubedns-v1.16" @@ -205,7 +206,8 @@ func LoadK8sVersionedTemplates() map[string]map[string]string { ">=1.8.0-rancher0 <1.15.0-rancher0": flannelv18, }, kdm.CoreDNS: { - ">=1.21.9-rancher1-2": coreDnsv183Rancher2, + ">=1.27.13-rancher1-1 <1.28.0-rancher1-1": coreDnsv183Rancher3, + ">=1.21.9-rancher1-2 <1.27.13-rancher1-1": coreDnsv183Rancher2, ">=1.21.0-rancher1-1 <1.21.9-rancher1-2": coreDnsv183, ">=1.20.15-rancher1-2 <1.21.0-rancher1-1": coreDnsv183Rancher2, ">=1.17.0-alpha <1.20.15-rancher1-2": coreDnsv117, @@ -376,6 +378,7 @@ func getTemplates() map[string]string { coreDnsv117: CoreDNSTemplateV117, coreDnsv183: CoreDNSTemplateV183, coreDnsv183Rancher2: CoreDNSTemplateV183Rancher2, + coreDnsv183Rancher3: CoreDNSTemplateV183Rancher3, kubeDnsv18: KubeDNSTemplate, kubeDnsv116: KubeDNSTemplateV116, diff --git a/pkg/validation/validation.go b/pkg/validation/validation.go index 22c8cb304..460f074b4 100644 --- a/pkg/validation/validation.go +++ b/pkg/validation/validation.go @@ -17,6 +17,12 @@ import ( "sigs.k8s.io/yaml" ) +const ( + rancherChart = "https://charts.rancher.io" + oldRancherChart = "https://github.com/rancher/charts" + rke2Chart = "https://rke2-charts.rancher.io" +) + var ( releaseDataURL = "https://releases.rancher.com/kontainer-driver-metadata/%s/data.json" releaseRegSyncURL = "https://raw.githubusercontent.com/rancher/kontainer-driver-metadata/%s/regsync.yaml" @@ -267,8 +273,17 @@ func validateRKE2Charts(release map[string]interface{}) error { if err != nil { return err } + var isValidRepo bool + switch repo { + case "rancher-charts": + isValidRepo = strings.HasPrefix(chartURL, rancherChart) || strings.HasPrefix(chartURL, oldRancherChart) + case "rancher-rke2-charts": + isValidRepo = strings.HasPrefix(chartURL, rke2Chart) + default: + isValidRepo = strings.HasPrefix(chartURL, "https://"+repo) + } expectedChartTarball := fmt.Sprintf("%s-%s.tgz", chartName, chartVersion) - if !strings.Contains(chartURL, expectedChartTarball) { + if !strings.Contains(chartURL, expectedChartTarball) || !isValidRepo { return fmt.Errorf("unexpected chart URL for %s/%s:%s: %s", repo, chartName, chartVersion, chartURL) } } diff --git a/regsync.yaml b/regsync.yaml index 91f40735d..6b51ec112 100644 --- a/regsync.yaml +++ b/regsync.yaml @@ -29,6 +29,12 @@ sync: - v0.3.0-rancher6 - v0.3.0-rancher7 - v0.3.0-rancher8 + - source: docker.io/rancher/hardened-addon-resizer + target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-addon-resizer' + type: repository + tags: + allow: + - 1.8.20-build20240410 - source: docker.io/rancher/hardened-calico target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-calico' type: repository @@ -44,6 +50,7 @@ sync: - v3.26.3-build20231109 - v3.27.0-build20240206 - v3.27.2-build20240308 + - v3.27.3-build20240423 - source: docker.io/rancher/hardened-cluster-autoscaler target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-cluster-autoscaler' type: repository @@ -65,6 +72,7 @@ sync: - v1.2.0-build20230523 - v1.2.0-build20231009 - v1.4.0-build20240122 + - v1.4.1-build20240325 - source: docker.io/rancher/hardened-coredns target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-coredns' type: repository @@ -97,6 +105,7 @@ sync: - v3.5.4-k3s1-build20221011 - v3.5.7-k3s1-build20230406 - v3.5.9-k3s1-build20230802 + - v3.5.9-k3s1-build20240418 - source: docker.io/rancher/hardened-flannel target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-flannel' type: repository @@ -113,6 +122,7 @@ sync: - v0.23.0-build20231109 - v0.24.2-build20240122 - v0.24.3-build20240307 + - v0.25.1-build20240423 - source: docker.io/rancher/hardened-ib-sriov-cni target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-ib-sriov-cni' type: repository @@ -133,6 +143,7 @@ sync: - v0.6.3-build20230515 - v0.6.3-build20230607 - v0.6.3-build20231009 + - v0.7.1-build20240401 - source: docker.io/rancher/hardened-kubernetes target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/hardened-kubernetes' type: repository @@ -175,6 +186,7 @@ sync: - v1.27.10-rke2r1-build20240117 - v1.27.11-rke2r1-build20240214 - v1.27.12-rke2r1-build20240315 + - v1.27.13-rke2r1-build20240416 - v1.27.5-rke2r1-build20230824 - v1.27.7-rke2r2-build20231102 - v1.27.8-rke2r1-build20231115 @@ -265,6 +277,7 @@ sync: - v0.1.4 - v0.1.5 - v0.2.0 + - v0.2.1 - source: docker.io/rancher/harvester-csi-driver target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/harvester-csi-driver' type: repository @@ -319,6 +332,7 @@ sync: - v1.27.10-rancher1 - v1.27.11-rancher1 - v1.27.12-rancher1 + - v1.27.13-rancher1 - v1.27.6-rancher1 - v1.27.8-rancher2 - source: docker.io/rancher/k3s-upgrade @@ -363,6 +377,7 @@ sync: - v1.27.10-k3s2 - v1.27.11-k3s1 - v1.27.12-k3s1 + - v1.27.13-k3s1 - v1.27.5-k3s1 - v1.27.7-k3s2 - v1.27.8-k3s2 @@ -435,6 +450,7 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-cni target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-cni' type: repository @@ -449,12 +465,14 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-csi target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-csi' type: repository tags: allow: - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-ctl target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-ctl' type: repository @@ -470,6 +488,7 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-kube-controllers target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-kube-controllers' type: repository @@ -485,6 +504,7 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-node target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-node' type: repository @@ -500,12 +520,14 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-node-driver-registrar target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-node-driver-registrar' type: repository tags: allow: - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-operator target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-operator' type: repository @@ -519,6 +541,7 @@ sync: - v1.30.7 - v1.32.3 - v1.32.5 + - v1.32.7 - source: docker.io/rancher/mirrored-calico-pod2daemon-flexvol target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-pod2daemon-flexvol' type: repository @@ -534,6 +557,7 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-calico-typha target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-calico-typha' type: repository @@ -547,11 +571,13 @@ sync: - v3.26.3 - v3.27.0 - v3.27.2 + - v3.27.3 - source: docker.io/rancher/mirrored-cilium-certgen target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-certgen' type: repository tags: allow: + - v0.1.11 - v0.1.8 - v0.1.9 - source: docker.io/rancher/mirrored-cilium-cilium @@ -570,6 +596,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-cilium-envoy target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-cilium-envoy' type: repository @@ -579,6 +606,7 @@ sync: - v1.25.9-f039e2bd380b7eef2f2feea5750676bb36133699 - v1.26.6-ff0d5d3f77d610040e93c7c7a430d61a0c0b90c1 - v1.27.3-713b673cccf1af661efd75ca20532336517ddcb9 + - v1.27.4-21905253931655328edaacf3cd16aeda73bbea2f - source: docker.io/rancher/mirrored-cilium-cilium-etcd-operator target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-cilium-etcd-operator' type: repository @@ -598,6 +626,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-hubble-relay target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-hubble-relay' type: repository @@ -611,6 +640,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-hubble-ui target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-hubble-ui' type: repository @@ -657,6 +687,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-operator-azure target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-operator-azure' type: repository @@ -673,6 +704,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-operator-generic target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-operator-generic' type: repository @@ -689,6 +721,7 @@ sync: - v1.14.2 - v1.14.4 - v1.15.1 + - v1.15.4 - source: docker.io/rancher/mirrored-cilium-startup-script target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-cilium-startup-script' type: repository @@ -848,6 +881,7 @@ sync: tags: allow: - 2.10.5 + - 2.10.7 - 2.6.2 - 2.9.1 - 2.9.10 @@ -935,6 +969,12 @@ sync: - v1.4.0 - v1.7.0 - v1.8.0 + - source: docker.io/rancher/mirrored-sig-storage-csi-snapshotter + target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-sig-storage-csi-snapshotter' + type: repository + tags: + allow: + - v6.2.1 - source: docker.io/rancher/mirrored-sig-storage-livenessprobe target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/mirrored-sig-storage-livenessprobe' type: repository @@ -975,6 +1015,7 @@ sync: - nginx-1.8.1-rancher1 - nginx-1.9.3-hardened1 - nginx-1.9.4-rancher1 + - nginx-1.9.6-hardened1 - source: docker.io/rancher/pause target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/pause' type: repository @@ -1005,6 +1046,7 @@ sync: - v1.26.3-build20230406 - v1.26.3-build20230608 - v1.28.2-build20231016 + - v1.29.3-build20240412 - source: docker.io/rancher/rke2-runtime target: '{{ env "REGISTRY_ENDPOINT" }}/rancher/rke2-runtime' type: repository @@ -1084,6 +1126,8 @@ sync: - v1.27.11-rke2r1-windows-amd64 - v1.27.12-rke2r1 - v1.27.12-rke2r1-windows-amd64 + - v1.27.13-rke2r1 + - v1.27.13-rke2r1-windows-amd64 - v1.27.5-rke2r1 - v1.27.5-rke2r1-windows-amd64 - v1.27.7-rke2r2 @@ -1132,6 +1176,7 @@ sync: - v1.27.10-rke2r1 - v1.27.11-rke2r1 - v1.27.12-rke2r1 + - v1.27.13-rke2r1 - v1.27.5-rke2r1 - v1.27.7-rke2r2 - v1.27.8-rke2r1 @@ -1177,6 +1222,7 @@ sync: - v1.27.10-k3s2 - v1.27.11-k3s1 - v1.27.12-k3s1 + - v1.27.13-k3s1 - v1.27.5-k3s1 - v1.27.7-k3s2 - v1.27.8-k3s2 @@ -1222,6 +1268,7 @@ sync: - v1.27.10-rke2r1 - v1.27.11-rke2r1 - v1.27.12-rke2r1 + - v1.27.13-rke2r1 - v1.27.5-rke2r1 - v1.27.7-rke2r2 - v1.27.8-rke2r1