Comparing sensitive data, confidential files or internal emails?

Most legal and privacy policies prohibit uploading sensitive data online. Diffchecker Desktop ensures your confidential information never leaves your computer. Work offline and compare documents securely.

HCloud Cilium Config Diff

Created Diff never expires
553 removals
680 lines
363 additions
690 lines
---
---
# Source: cilium/charts/agent/templates/serviceaccount.yaml
# Source: cilium/templates/cilium-agent-serviceaccount.yaml
apiVersion: v1
apiVersion: v1
kind: ServiceAccount
kind: ServiceAccount
metadata:
metadata:
name: cilium
name: cilium
namespace: kube-system
namespace: kube-system
---
---
# Source: cilium/charts/operator/templates/serviceaccount.yaml
# Source: cilium/templates/cilium-operator-serviceaccount.yaml
apiVersion: v1
apiVersion: v1
kind: ServiceAccount
kind: ServiceAccount
metadata:
metadata:
name: cilium-operator
name: cilium-operator
namespace: kube-system
namespace: kube-system
---
---
# Source: cilium/charts/config/templates/configmap.yaml
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
apiVersion: v1
kind: ConfigMap
kind: ConfigMap
metadata:
metadata:
name: cilium-config
name: cilium-config
namespace: kube-system
namespace: kube-system
data:
data:


# Identity allocation mode selects how identities are shared between cilium
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# These can be queried with:
# kubectl get ciliumid
# kubectl get ciliumid
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
# setting it to "kvstore".
identity-allocation-mode: crd
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"


# If you want to run cilium in debug mode change this value to true
# If you want to run cilium in debug mode change this value to true
debug: "false"
debug: "false"
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":9100"


# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
# address.
enable-ipv4: "true"
enable-ipv4: "true"


# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
# address.
enable-ipv6: "false"
enable-ipv6: "false"

# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "true"
# If you want cilium monitor to aggregate tracing for packets, set this level
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
# that will be seen in monitor output.
monitor-aggregation: medium
monitor-aggregation: medium


# The monitor aggregation interval governs the typical time between monitor
# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
# notification events for each allowed connection.
#
#
# Only effective when monitor aggregation is set to "medium" or higher.
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: 5s
monitor-aggregation-interval: 5s


# The monitor aggregation flags determine which TCP flags which, upon the
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
# first observation, cause monitor notifications to be generated.
#
#
# Only effective when monitor aggregation is set to "medium" or higher.
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
monitor-aggregation-flags: all

# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# ct-global-max-entries-* specifies the maximum number of connections
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
# supported across all endpoints, split by protocol: tcp or other. One pair
bpf-map-dynamic-size-ratio: "0.0025"
# of maps uses these values for IPv4 connections, and another pair of maps
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# use these values for IPv6 connections.
# policy map (per endpoint)
#
bpf-policy-map-max: "16384"
# If these values are modified, then during the next Cilium startup the
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# tracking of ongoing connections may be disrupted. This may lead to brief
# backend and affinity maps.
# policy drops or a change in loadbalancing decisions for a connection.
bpf-lb-map-max: "65536"
#
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
# during the upgrade process, comment out these options.
bpf-ct-global-tcp-max: "524288"
bpf-ct-global-any-max: "262144"

# Pre-allocation of map entries allows per-packet latency to be reduced, at
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
# users who are sensitive to latency may consider setting this to "true".
#
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
# this option and behave as though it is set to "true".
#
#
# If this value is modified, then during the next Cilium startup the restore
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# As a result, reply packets may be dropped and the load-balancing decisions
# connection for some time. Endpoints may need to be recreated to restore
# for established connections may change.
# connectivity.
#
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
preallocate-bpf-maps: "false"


# Regular expression matching compatible Istio sidecar istio-proxy
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
sidecar-istio-proxy-image: "cilium/istio_proxy"


# Encapsulation mode for communication between nodes
# Encapsulation mode for communication between nodes
# Possible values:
# Possible values:
# - disabled
# - disabled
# - vxlan (default)
# - vxlan (default)
# - geneve
# - geneve
tunnel: disabled
tunnel: disabled


# Name of the cluster. Only relevant when building a mesh of clusters.
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
cluster-name: default

# Enables L7 proxy for L7 policy enforcement and visibility
# DNS Polling periodically issues a DNS lookup for each `matchName` from
enable-l7-proxy: "true"
# cilium-agent. The result is used to regenerate endpoint policy.
# DNS lookups are repeated with an interval of 5 seconds, and are made for
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
# data is used instead. An IP change will trigger a regeneration of the Cilium
# policy for each endpoint and increment the per cilium-agent policy
# repository revision.
#
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "false"


# wait-bpf-mount makes init container wait until bpf filesystem is mounted
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
wait-bpf-mount: "false"


masquerade: "true"
masquerade: "true"
enable-ipsec: "false"
enable-bpf-masquerade: "true"
ipsec-key-file: /etc/ipsec/keys

encrypt-interface: ens10
encrypt-node: "false"
enable-xt-socket-fallback: "true"
enable-xt-socket-fallback: "true"
install-iptables-rules: "true"
install-iptables-rules: "true"

auto-direct-node-routes: "false"
auto-direct-node-routes: "false"
enable-bandwidth-manager: "false"
enable-local-redirect-policy: "false"
kube-proxy-replacement: "probe"
kube-proxy-replacement: "probe"
enable-host-reachable-services: "false"
kube-proxy-replacement-healthz-bind-address: ""
enable-external-ips: "false"
enable-health-check-nodeport: "true"
enable-node-port: "false"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
enable-remote-node-identity: "true"
operator-api-serve-addr: "127.0.0.1:9234"
# Enable Hubble gRPC service.
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
ipam: "cluster-pool"
cluster-pool-ipv4-cidr: 10.224.0.0/16
cluster-pool-ipv4-mask-size: "24"
disable-cnp-status-updates: "true"


blacklist-conflicting-routes: "false"
#Inserted Configuration
native-routing-cidr: 10.0.0.0/8
enable-endpoint-routes: "true"
---
---
# Source: cilium/charts/agent/templates/clusterrole.yaml
# Source: cilium/templates/cilium-agent-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
kind: ClusterRole
metadata:
metadata:
name: cilium
name: cilium
rules:
rules:
- apiGroups:
- apiGroups:
- networking.k8s.io
- networking.k8s.io
resources:
resources:
- networkpolicies
- networkpolicies
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- apiGroups:
- apiGroups:
- discovery.k8s.io
- discovery.k8s.io
resources:
resources:
- endpointslices
- endpointslices
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- apiGroups:
- apiGroups:
- ""
- ""
resources:
resources:
- namespaces
- namespaces
- services
- services
- nodes
- nodes
- endpoints
- endpoints
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- apiGroups:
- apiGroups:
- ""
- ""
resources:
resources:
- pods
- pods
- nodes
- pods/finalizers
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- update
- update
- apiGroups:
- delete
- ""
- apiGroups:
resources:
- ""
- nodes
resources:
- nodes/status
- nodes
verbs:
verbs:
- patch
- get
- apiGroups:
- list
- apiextensions.k8s.io
- watch
resources:
- update
- customresourcedefinitions
- apiGroups:
verbs:
- ""
- create
resources:
- get
- nodes
- list
- nodes/status
- watch
verbs:
- update
- patch
- apiGroups:
- apiGroups:
- cilium.io
- apiextensions.k8s.io
resources:
resources:
- ciliumnetworkpolicies
- customresourcedefinitions
- ciliumnetworkpolicies/status
verbs:
- ciliumclusterwidenetworkpolicies
# Deprecated for removal in v1.10
- ciliumclusterwidenetworkpolicies/status
- create
- ciliumendpoints
- list
- ciliumendpoints/status
- watch
- ciliumnodes
- update
- ciliumnodes/status

- ciliumidentities
# This is used when validating policies in preflight. This will need to stay
- ciliumidentities/status
# until we figure out how to avoid "get" inside the preflight, and then
verbs:
# should be removed ideally.
- "*"
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
---
---
# Source: cilium/charts/operator/templates/clusterrole.yaml
# Source: cilium/templates/cilium-operator-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
kind: ClusterRole
metadata:
metadata:
name: cilium-operator
name: cilium-operator
rules:
rules:
- apiGroups:
- apiGroups:
- ""
- ""
resources:
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
# managed by Cilium
- pods
- pods
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- delete
- delete
- apiGroups:
- apiGroups:
- discovery.k8s.io
- discovery.k8s.io
resources:
resources:
- endpointslices
- endpointslices
verbs:
verbs:
- get
- get
- list
- list
- watch
- watch
- apiGroups:
- apiGroups:
- ""
- ""
resources:
resources:
# to automatically read from k8s and import the node"s pod CIDR to cilium"s
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
# etcd so all nodes know how to reach another pod running in in a different
- services
# node.
- endpoints
- nodes
# to check apiserver connectivity
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- namespaces
- services
verbs:
- endpoints
- get
# to check apiserver connectivity
- list
- namespaces
- watch
verbs:
- apiGroups:
- get
- cilium.io
- list
resources:
- watch
- ciliumnetworkpolicies
- apiGroups:
- ciliumnetworkpolicies/status
- cilium.io
- ciliumnetworkpolicies/finalizers
resources:
- ciliumclusterwidenetworkpolicies
- ciliumnetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumendpoints
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliumendpoints/finalizers
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities
- ciliumidentities/status
- ciliumidentities/status
verbs:
- ciliumidentities/finalizers
- "*"
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- update
- watch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between mulitple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
# The support for leases was introduced in coordination.k8s.io/v1 during Kubernetes 1.14 release.
# In Cilium we currently don't support HA mode for K8s version < 1.14. This condition make sure
# that we only authorize access to leases resources in supported K8s versions.
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
---
# Source: cilium/charts/agent/templates/clusterrolebinding.yaml
# Source: cilium/templates/cilium-agent-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
kind: ClusterRoleBinding
metadata:
metadata:
name: cilium
name: cilium
roleRef:
roleRef:
apiGroup: rbac.authorization.k8s.io
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
kind: ClusterRole
name: cilium
name: cilium
subjects:
subjects:
- kind: ServiceAccount
- kind: ServiceAccount
name: cilium
name: cilium
namespace: kube-system
namespace: kube-system
---
---
# Source: cilium/charts/operator/templates/clusterrolebinding.yaml
# Source: cilium/templates/cilium-operator-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
kind: ClusterRoleBinding
metadata:
metadata:
name: cilium-operator
name: cilium-operator
roleRef:
roleRef:
apiGroup: rbac.authorization.k8s.io
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
kind: ClusterRole
name: cilium-operator
name: cilium-operator
subjects:
subjects:
- kind: ServiceAccount
- kind: ServiceAccount
name: cilium-operator
name: cilium-operator
namespace: kube-system
namespace: kube-system
---
---
# Source: cilium/charts/agent/templates/daemonset.yaml
# Source: cilium/templates/cilium-agent-daemonset.yaml
apiVersion: apps/v1
apiVersion: apps/v1
kind: DaemonSet
kind: DaemonSet
metadata:
metadata:
labels:
labels:
k8s-app: cilium
k8s-app: cilium
name: cilium
name: cilium
namespace: kube-system
namespace: kube-system
spec:
spec:
selector:
selector:
matchLabels:
matchLabels:
k8s-app: cilium
k8s-app: cilium
Text moved from lines 540-543
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
template:
metadata:
metadata:
annotations:
annotations:
prometheus.io/port: "9100"
prometheus.io/scrape: "true"
# This annotation plus the CriticalAddonsOnly toleration makes
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
labels:
k8s-app: cilium
k8s-app: cilium
spec:
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- cilium
topologyKey: kubernetes.io/hostname
containers:
containers:
- args:
- args:
- --config-dir=/tmp/cilium/config-map
- --config-dir=/tmp/cilium/config-map
command:
command:
- cilium-agent
- cilium-agent
livenessProbe:
livenessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: 9876
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 10
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
host: '127.0.0.1'
path: /healthz
port: 9876
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE
valueFrom:
configMapKeyRef:
key: cni-chaining-mode
name: cilium-config
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom:
configMapKeyRef:
key: custom-cni-conf
name: cilium-config
optional: true
image: quay.io/cilium/cilium:v1.9.0
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
exec:
command:
command:
- cilium
- "/cni-install.sh"
- status
- "--enable-debug=false"
- --brief
preStop:
failureThreshold: 10
# The initial delay for the liveness probe is intentionally large to
# avoid an endless kill & restart cycle if in the event that the initial
# bootstrapping takes longer than expected.
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
exec:
exec:
command:
command:
- cilium
- /cni-uninstall.sh
- status
name: cilium-agent
- --brief
securityContext:
failureThreshold: 3
capabilities:
initialDelaySeconds: 5
add:
periodSeconds: 30
- NET_ADMIN
successThreshold: 1
- SYS_MODULE
timeoutSeconds: 5
privileged: true
env:
volumeMounts:
- name: K8S_NODE_NAME
- mountPath: /sys/fs/bpf
valueFrom:
name: bpf-maps
fieldRef:
- mountPath: /var/run/cilium
apiVersion: v1
name: cilium-run
fieldPath: spec.nodeName
- mountPath: /host/opt/cni/bin
- name: CILIUM_K8S_NAMESPACE
name: cni-path
valueFrom:
- mountPath: /host/etc/cni/net.d
fieldRef:
name: etc-cni-netd
apiVersion: v1
- mountPath: /var/lib/cilium/clustermesh
fieldPath: metadata.namespace
name: clustermesh-secrets
- name: CILIUM_FLANNEL_MASTER_DEVICE
readOnly: true
valueFrom:
- mountPath: /tmp/cilium/config-map
configMapKeyRef:
name: cilium-config-path
key: flannel-master-device
readOnly: true
name: cilium-config
# Needed to be able to load kernel modules
optional: true
- mountPath: /lib/modules
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
name: lib-modules
valueFrom:
readOnly: true
configMapKeyRef:
- mountPath: /run/xtables.lock
key: flannel-uninstall-on-exit
name: xtables-lock
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE
valueFrom:
configMapKeyRef:
key: cni-chaining-mode
name: cilium-config
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom:
configMapKeyRef:
key: custom-cni-conf
name: cilium-config
optional: true
image: "docker.io/cilium/cilium:v1.7.1"
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- "/cni-install.sh"
- "--enable-debug=false"
preStop:
exec:
command:
- /cni-uninstall.sh
name: cilium-agent
ports:
- containerPort: 9100
hostPort: 9100
name: prometheus
protocol: TCP
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
# Needed to be able to load kernel modules
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
hostNetwork: true
hostNetwork: true
initContainers:
initContainers:
- command:
- command:
- /init-container.sh
- /init-container.sh
env:
env:
- name: CILIUM_ALL_STATE
- name: CILIUM_ALL_STATE
valueFrom:
valueFrom:
configMapKeyRef:
configMapKeyRef:
key: clean-cilium-state
key: clean-cilium-state
name: cilium-config
name: cilium-config
optional: true
optional: true
- name: CILIUM_BPF_STATE
- name: CILIUM_BPF_STATE
valueFrom:
valueFrom:
configMapKeyRef:
configMapKeyRef:
key: clean-cilium-bpf-state
key: clean-cilium-bpf-state
name: cilium-config
name: cilium-config
optional: true
optional: true
- name: CILIUM_WAIT_BPF_MOUNT
- name: CILIUM_WAIT_BPF_MOUNT
valueFrom:
valueFrom:
configMapKeyRef:
configMapKeyRef:
key: wait-bpf-mount
key: wait-bpf-mount
name: cilium-config
name: cilium-config
optional: true
optional: true
image: "docker.io/cilium/cilium:v1.7.1"
image: quay.io/cilium/cilium:v1.9.0
imagePullPolicy: IfNotPresent
imagePullPolicy: IfNotPresent
name: clean-cilium-state
name: clean-cilium-state
securityContext:
securityContext:
capabilities:
capabilities:
add:
add:
- NET_ADMIN
- NET_ADMIN
privileged: true
privileged: true
volumeMounts:
volumeMounts:
- mountPath: /sys/fs/bpf
- mountPath: /sys/fs/bpf
name: bpf-maps
name: bpf-maps
mountPropagation: HostToContainer
mountPropagation: HostToContainer
- mountPath: /var/run/cilium
- mountPath: /var/run/cilium
name: cilium-run
name: cilium-run
resources:
requests:
cpu: 100m
memory: 100Mi
restartPolicy: Always
restartPolicy: Always
priorityClassName: system-node-critical
priorityClassName: system-node-critical
serviceAccount: cilium
serviceAccount: cilium
serviceAccountName: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
terminationGracePeriodSeconds: 1
tolerations:
tolerations:
- operator: Exists
- operator: Exists
volumes:
volumes:
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades
- hostPath:
- hostPath:
path: /var/run/cilium
path: /var/run/cilium
type: DirectoryOrCreate
type: DirectoryOrCreate
name: cilium-run
name: cilium-run
# To keep state between restarts / upgrades for bpf maps
# To keep state between restarts / upgrades for bpf maps
- hostPath:
- hostPath:
path: /sys/fs/bpf
path: /sys/fs/bpf
type: DirectoryOrCreate
type: DirectoryOrCreate
name: bpf-maps
name: bpf-maps
# To install cilium cni plugin in the host
# To install cilium cni plugin in the host
- hostPath:
- hostPath:
path: /opt/cni/bin
path: /opt/cni/bin
type: DirectoryOrCreate
type: DirectoryOrCreate
name: cni-path
name: cni-path
# To install cilium cni configuration in the host
# To install cilium cni configuration in the host
- hostPath:
- hostPath:
path: /etc/cni/net.d
path: /etc/cni/net.d
type: DirectoryOrCreate
type: DirectoryOrCreate
name: etc-cni-netd
name: etc-cni-netd
# To be able to load kernel modules
# To be able to load kernel modules
- hostPath:
- hostPath:
path: /lib/modules
path: /lib/modules
name: lib-modules
name: lib-modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
# To access iptables concurrently with other processes (e.g. kube-proxy)
- hostPath:
- hostPath:
path: /run/xtables.lock
path: /run/xtables.lock
type: FileOrCreate
type: FileOrCreate
name: xtables-lock
name: xtables-lock
# To read the clustermesh configuration
# To read the clustermesh configuration
- name: clustermesh-secrets
- name: clustermesh-secrets
secret:
secret:
defaultMode: 420
defaultMode: 420
optional: true
optional: true
secretName: cilium-clustermesh
secretName: cilium-clustermesh
# To read the configuration from the config map
# To read the configuration from the config map
- configMap:
- configMap:
name: cilium-config
name: cilium-config
name: cilium-config-path
name: cilium-config-path
Text moved to lines 371-374
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
---
---
# Source: cilium/charts/operator/templates/deployment.yaml
# Source: cilium/templates/cilium-operator-deployment.yaml
apiVersion: apps/v1
apiVersion: apps/v1
kind: Deployment
kind: Deployment
metadata:
metadata:
labels:
labels:
io.cilium/app: operator
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
namespace: kube-system
spec:
spec:
replicas: 1
# We support HA mode only for Kubernetes version > 1.14
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 2
selector:
selector:
matchLabels:
matchLabels:
io.cilium/app: operator
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
strategy:
strategy:
rollingUpdate:
rollingUpdate:
maxSurge: 1
maxSurge: 1
maxUnavailable: 1
maxUnavailable: 1
type: RollingUpdate
type: RollingUpdate
template:
template:
metadata:
metadata:
annotations:
annotations:
prometheus.io/port: "6942"
prometheus.io/scrape: "true"
labels:
labels:
io.cilium/app: operator
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
spec:
spec:
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: io.cilium/app
operator: In
values:
- operator
topologyKey: kubernetes.io/hostname
containers:
containers:
- args:
- args:
- --debug=$(CILIUM_DEBUG)
- --config-dir=/tmp/cilium/config-map
- --identity-allocation-mode=$(CILIUM_IDENTITY_ALLOCATION_MODE)
- --debug=$(CILIUM_DEBUG)
- --enable-metrics
command:
- --synchronize-k8s-nodes=true
- cilium-operator-generic
command:
env:
- cilium-operator
- name: K8S_NODE_NAME
env:
valueFrom:
- name: CILIUM_K8S_NAMESPACE
fieldRef:
valueFrom:
apiVersion: v1
fieldRef:
fieldPath: spec.nodeName
apiVersion: v1
- name: CILIUM_K8S_NAMESPACE
fieldPath: metadata.namespace
valueFrom:
- name: K8S_NODE_NAME
fieldRef:
valueFrom:
apiVersion: v1
fieldRef:
fieldPath: metadata.namespace
apiVersion: v1
- name: CILIUM_DEBUG
fieldPath: spec.nodeName
valueFrom:
- name: CILIUM_DEBUG
configMapKeyRef:
valueFrom:
key: debug
configMapKeyRef:
name: cilium-config
key: debug
optional: true
name: cilium-config
image: quay.io/cilium/operator-generic:v1.9.0
optional: true
imagePullPolicy: IfNotPresent
- name: CILIUM_CLUSTER_NAME
name: cilium-operator
valueFrom:
livenessProbe:
configMapKeyRef:
httpGet:
key: cluster-name
host: '127.0.0.1'
name: cilium-config
path: /healthz
optional: true
port: 9234
- name: CILIUM_CLUSTER_ID
scheme: HTTP
valueFrom:
initialDelaySeconds: 60
configMapKeyRef:
periodSeconds: 10
key: cluster-id
timeoutSeconds: 3
name: cilium-config
volumeMounts:
optional: true
- mountPath: /tmp/cilium/config-map
- name: CILIUM_IPAM
name: cilium-config-path
valueFrom:
readOnly: true
configMapKeyRef:
key: ipam
name: cilium-config
optional: true
- name: CILIUM_DISABLE_ENDPOINT_CRD
valueFrom:
configMapKeyRef:
key: disable-endpoint-crd
name: cilium-config
optional: true
- name: CILIUM_KVSTORE
valueFrom:
configMapKeyRef:
key: kvstore
name: cilium-config
optional: true
- name: CILIUM_KVSTORE_OPT
valueFrom:
configMapKeyRef:
key: kvstore-opt
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
- name: CILIUM_IDENTITY_ALLOCATION_MODE
valueFrom:
configMapKeyRef:
key: identity-allocation-mode
name: cilium-config
optional: true
image: "docker.io/cilium/operator:v1.7.1"
imagePullPolicy: IfNotPresent
name: cilium-operator
ports:
- containerPort: 6942
hostPort: 6942
name: prometheus
protocol: TCP
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
hostNetwork: true
hostNetwork: true
restartPolicy: Always
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccount: cilium-operator
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
serviceAccountName: cilium-operator

tolerations:
- operator: Exists
volumes:
# To read the configuration from the config map
- configMap:
name: cilium-config
name: cilium-config-path