-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcanal-no-flannel.yaml
180 lines (172 loc) · 5.73 KB
/
canal-no-flannel.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd-endpoints: "http://127.0.0.1:2379"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosing using the node's
# default route.
canal-iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni-network-config: |-
{
"name": "canal",
"type": "flannel",
"delegate": {
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
}
---
# This manifest installs the per-node agents, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal-node
namespace: kube-system
labels:
k8s-app: canal-node
spec:
selector:
matchLabels:
k8s-app: canal-node
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
labels:
k8s-app: canal-node
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and local routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v0.22.0
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd-endpoints
# Disable Calico BGP. Calico is simply enforcing policy.
- name: CALICO_NETWORKING
value: "false"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-calico-cni
image: calico/cni:v1.4.2
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The name of the CNI network config file to install.
- name: CNI_CONF_NAME
value: "10-calico.conf"
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd-endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni-network-config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:v0.3.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd-endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"