From 459fde51dfba585e0b532a695c1ca7b021fdff92 Mon Sep 17 00:00:00 2001
From: ilia1243 <8808144+ilia1243@users.noreply.github.com>
Date: Thu, 30 Mar 2023 18:50:11 +0300
Subject: [PATCH] MANOPD-86292 Remove flannel and haproxy-ingress-controller
plugins (#393)
* Completely remove flannel and haproxy-ingress-controller plugins
* Remove no longer necessary calico enrichment
---
documentation/Installation.md | 138 +---
examples/cluster.yaml/full-cluster.yaml | 19 -
kubemarine.spec | 1 -
kubemarine/admission.py | 3 +-
kubemarine/core/defaults.py | 1 -
kubemarine/plugins/calico.py | 17 -
kubemarine/plugins/haproxy_ingress.py | 28 -
.../resources/configurations/defaults.yaml | 37 +-
.../schemas/definitions/plugins.json | 6 -
.../schemas/definitions/plugins/flannel.json | 19 -
.../plugins/haproxy-ingress-controller.json | 50 --
kubemarine/templates/plugins/flannel.yaml.j2 | 605 ------------------
.../haproxy-ingress-controller.yaml.j2 | 234 -------
13 files changed, 10 insertions(+), 1148 deletions(-)
delete mode 100755 kubemarine/plugins/haproxy_ingress.py
delete mode 100644 kubemarine/resources/schemas/definitions/plugins/flannel.json
delete mode 100644 kubemarine/resources/schemas/definitions/plugins/haproxy-ingress-controller.json
delete mode 100644 kubemarine/templates/plugins/flannel.yaml.j2
delete mode 100644 kubemarine/templates/plugins/haproxy-ingress-controller.yaml.j2
diff --git a/documentation/Installation.md b/documentation/Installation.md
index 7cd1234cf..d15122c95 100644
--- a/documentation/Installation.md
+++ b/documentation/Installation.md
@@ -68,9 +68,7 @@ This section provides information about the inventory, features, and steps for i
- [Plugins](#plugins)
- [Predefined Plugins](#predefined-plugins)
- [calico](#calico)
- - [flannel](#flannel)
- [nginx-ingress-controller](#nginx-ingress-controller)
- - [haproxy-ingress-controller](#haproxy-ingress-controller)
- [kubernetes-dashboard](#kubernetes-dashboard)
- [local-path-provisioner](#local-path-provisioner)
- [Plugins Features](#plugins-features)
@@ -468,7 +466,7 @@ services:
#### Plugins Deployment with Node Taints
-The plugins also require the tolerations section in case of node taints. The Calico and Flannel pods already have tolerations to be assigned to all the cluster nodes. But for other plugins, it should be set in cluster.yaml. For more information, see [Tolerations](#tolerations).
+The plugins also require the tolerations section in case of node taints. The Calico pods already have tolerations to be assigned to all the cluster nodes. But for other plugins, it should be set in cluster.yaml. For more information, see [Tolerations](#tolerations).
If you create your own plugins, the tolerations settings should be taken into account.
@@ -3347,7 +3345,7 @@ There are three parts of PSS configuration.
* default profile is described in the `defaults` section and `enforce` defines the policy standard that enforces the pods
* `exemptions` describes exemptions from default rules
-The PSS enabling requires special labels for plugin namespaces such as `nginx-ingress-controller`, `haproxy-ingress-controller`, `kubernetes-dashboard`, and `local-path-provisioner`. For instance:
+The PSS enabling requires special labels for plugin namespaces such as `nginx-ingress-controller`, `kubernetes-dashboard`, and `local-path-provisioner`. For instance:
```yaml
apiVersion: v1
@@ -3462,10 +3460,8 @@ When you want to install a plugin, the installer includes pre-configured plug-in
* Network plugins
* [calico](#calico)
- * [flannel](#flannel)
* Ingress Controllers
* [nginx-ingress-controller](#nginx-ingress-controller)
- * [haproxy-ingress-controller](#haproxy-ingress-controller)
* [kubernetes-dashboard](#kubernetes-dashboard)
* [local-path-provisioner](#local-path-provisioner)
@@ -3651,44 +3647,6 @@ plugins:
For more information about the supported Calico environment variables, refer to the official Calico documentation at [https://docs.projectcalico.org/reference/node/configuration](https://docs.projectcalico.org/reference/node/configuration).
-##### flannel
-
-Before proceeding, refer to the [Official Documentation of the Kubernetes Cluster Network](https://kubernetes.io/docs/concepts/cluster-administration/networking/).
-
-**Warning**: This plugin is experimental. It is not recommended to use it in production.
-
-Flannel plugin is not installed by default. However, it is possible to explicitly enable or disable the installation of this plugin through the `install` plugin parameter.
-
-The following is an example to enable the plugin:
-
-```yaml
-plugins:
- flannel:
- install: true
-```
-
-If you explicitly enable Flannel plugin and do not enable Calico plugin, then only Flannel plugin is installed, and Calico plugin is not installed by default.
-
-After applying the plugin configurations, the plugin installation procedure waits for the following pods to be in the `Running` state:
-* coredns
-* kube-flannel-ds-amd64
-
-If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted.
-
-By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `flannel` plugin section and list all the necessary parameters and their values in it.
-For example:
-
-```yaml
-plugins:
- flannel:
- install: true
- image: quay.io/coreos/flannel:v0.11.0-amd64
-```
-
-An example is also available in [Full Inventory Example](../examples/cluster.yaml/full-cluster.yaml).
-
-The plugin configuration supports the `image` parameter. The `image` parameter specifies the string for the Flannel image. The default value is `quay.io/coreos/flannel:v0.11.0-amd64`.
-
##### nginx-ingress-controller
Before proceeding, refer to the [Official Documentation of the Kubernetes Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) and visit [official Nginx Ingress Controller repository](https://github.com/nginxinc/kubernetes-ingress).
@@ -3789,50 +3747,6 @@ For example:
###### monitoring
By default 10254 port is opened and provides Prometheus metrics.
-##### haproxy-ingress-controller
-
-Before proceeding, refer to the [Official Documentation of the Kubernetes Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) and visit [official HAProxy Ingress Controller repository](https://github.com/haproxytech/kubernetes-ingress).
-
-**Warning**: This plugin is experimental. It is not recommended to use it in production.
-
-HAProxy Ingress Controller plugin is not installed by default. However, you can explicitly enable or disable the installation of this plugin through the `install` plugin parameter.
-
-The following is an example to enable the plugin:
-
-```yaml
-plugins:
- haproxy-ingress-controller:
- install: true
-```
-
-If you explicitly enable HAProxy Ingress Controller plugin, but do not enable NGINX Ingress Controller plugin, then only HAProxy plugin is installed, and NGINX plugin is not installed by default.
-
-After applying the plugin configurations, the plugin installation procedure waits for `haproxy-ingress` pod to be in the `Running` state.
-
-If the pods do not have time to start at a specific timeout, then the plugin configuration is incorrect. In this case, the installation is aborted.
-
-By default, no additional settings are required for the plugin. However, you can change the default settings. To do this, in the `plugins` section of the config file, specify the `haproxy-ingress-controller` plugin section and list all the necessary parameters and their values in it.
-For example:
-
-```yaml
-plugins:
- flannel:
- install: true
- controller:
- image: haproxytech/kubernetes-ingress:1.2.7
- backend:
- image: k8s.gcr.io/defaultbackend:1.0
-```
-
-An example is also available in [Full Inventory Example](../examples/cluster.yaml/full-cluster.yaml).
-
-The plugin configuration supports the following parameters:
-
-|Name|Type|Default Value|Value Rules|Description|
-|---|---|---|---|---|
-|controller.image|string|`haproxytech/kubernetes-ingress:1.2.7`| |HAProxy Ingress Controller image|
-|backend.image|string|`k8s.gcr.io/defaultbackend:1.0`| |Default Backend image for HAProxy Ingress Controller|
-
##### kubernetes-dashboard
Before proceeding, refer to the [Official Documentation of the Kubernetes Dashboard UI](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) and visit [official Kubernetes Dashboard repository](https://github.com/kubernetes/dashboard).
@@ -4064,10 +3978,9 @@ Plugins are installed in a strict sequential order. The installation sequence is
|Plugin|Priority|
|---|---|
|calico|`0`|
-|flannel|`0`|
|nginx-ingress-controller|`1`|
-|haproxy-ingress-controller|`1`|
|kubernetes-dashboard|`2`|
+|local-path-provisioner|`2`|
You can change the priorities of preinstalled plugins, as well as set your own priority for the custom plugins.
The following is an example of how to prioritize a plugin:
@@ -4101,12 +4014,6 @@ The following table contains details about existing nodeSelector configuration o
kubernetes.io/os: linux |
nodeSelector applicable only for calico typha and calico kube-controllers containers, but not for ordinary calico containers, which should be deployed on all nodes |
-
- flannel |
- - |
- - |
- It is not possible to configure nodeSelector for flannel since flannel containers should run on all nodes |
-
nginx-ingress-controller |
controller.nodeSelector |
@@ -4115,17 +4022,6 @@ The following table contains details about existing nodeSelector configuration o
|
-
- haproxy-ingress-controller |
-
- controller.nodeSelector
- backend.nodeSelector
- |
-
- kubernetes.io/os: linux
- |
- |
-
kubernetes-dashboard |
@@ -4177,30 +4073,12 @@ The following table contains details about existing tolerations configuration op
|
tolerations are not configurable for network plugins |
-
- flannel |
- - |
-
- - effect: NoSchedule
- operator: Exists
- |
- tolerations are not configurable for network plugins |
-
nginx-ingress-controller |
|
none |
|
-
- haproxy-ingress-controller |
-
- controller.tolerations
- backend.tolerations
- |
- node |
- |
-
kubernetes-dashboard |
@@ -4502,15 +4380,15 @@ For example:
```yaml
plugins:
- haproxy-ingress-controller:
+ nginx-ingress-controller:
installation:
procedures:
- python:
- module: /var/data/plugins/ingress_controller.py
- method: override_priviledged_ports
+ module: plugins/builtin.py
+ method: apply_yaml
arguments:
- service: haproxy-ingress
- namespace: haproxy-controller
+ plugin_name: nginx-ingress-controller
+ original_yaml_path: plugins/yaml/nginx-ingress-controller-{{ plugins.nginx-ingress-controller.version }}-original.yaml
```
##### thirdparty
diff --git a/examples/cluster.yaml/full-cluster.yaml b/examples/cluster.yaml/full-cluster.yaml
index c2314226b..28666246e 100644
--- a/examples/cluster.yaml/full-cluster.yaml
+++ b/examples/cluster.yaml/full-cluster.yaml
@@ -382,12 +382,6 @@ plugins:
flexvol:
image: calico/pod2daemon-flexvol:v3.10.1
- flannel:
- install: false
- installation:
- priority: 0
- image: quay.io/coreos/flannel:v0.11.0-amd64
-
nginx-ingress-controller:
install: true
installation:
@@ -403,19 +397,6 @@ plugins:
nodeSelector:
kubernetes.io/os: linux
- haproxy-ingress-controller:
- install: false
- installation:
- priority: 1
- controller:
- image: haproxytech/kubernetes-ingress:1.2.7
- nodeSelector:
- kubernetes.io/os: linux
- backend:
- image: k8s.gcr.io/defaultbackend:1.0
- nodeSelector:
- kubernetes.io/os: linux
-
kubernetes-dashboard:
install: true
installation:
diff --git a/kubemarine.spec b/kubemarine.spec
index e86ab3406..f1d2cec0b 100644
--- a/kubemarine.spec
+++ b/kubemarine.spec
@@ -27,7 +27,6 @@ a = Analysis(['./kubemarine/__main__.py'],
'kubemarine.plugins.builtin',
'kubemarine.plugins.calico',
'kubemarine.plugins.nginx_ingress',
- 'kubemarine.plugins.haproxy_ingress',
'kubemarine.plugins.kubernetes_dashboard',
'kubemarine.core.schema'
],
diff --git a/kubemarine/admission.py b/kubemarine/admission.py
index b29de42ea..1be8dd49b 100644
--- a/kubemarine/admission.py
+++ b/kubemarine/admission.py
@@ -46,8 +46,7 @@
baseline_plugins = {"kubernetes-dashboard": "kubernetes-dashboard"}
privileged_plugins = {"nginx-ingress-controller": "ingress-nginx",
- "local-path-provisioner": "local-path-storage",
- "haproxy-ingress-controller": "haproxy-controller"}
+ "local-path-provisioner": "local-path-storage"}
loaded_oob_policies = {}
diff --git a/kubemarine/core/defaults.py b/kubemarine/core/defaults.py
index 28c65ef29..46d2e78a5 100755
--- a/kubemarine/core/defaults.py
+++ b/kubemarine/core/defaults.py
@@ -52,7 +52,6 @@
"kubemarine.kubernetes.enrich_inventory",
"kubemarine.admission.enrich_inventory",
"kubemarine.kubernetes_accounts.enrich_inventory",
- "kubemarine.plugins.calico.enrich_inventory",
"kubemarine.plugins.nginx_ingress.cert_renew_enrichment",
"kubemarine.plugins.nginx_ingress.enrich_inventory",
"kubemarine.core.defaults.calculate_nodegroups",
diff --git a/kubemarine/plugins/calico.py b/kubemarine/plugins/calico.py
index 85b1a572b..026612d3f 100755
--- a/kubemarine/plugins/calico.py
+++ b/kubemarine/plugins/calico.py
@@ -21,23 +21,6 @@
from kubemarine.plugins.manifest import Processor, EnrichmentFunction, Manifest
-def enrich_inventory(inventory, cluster):
-
- # By default, we use calico, but have to find it out
- # First of all we have to check is Calicon set to be installed or not
- # By default installation parameter is unset, means user did not make any decision
- if inventory["plugins"]["calico"].get("install") is None:
- # Is user defined Flannel plugin and set it to install?
- flannel_required = inventory["plugins"].get("flannel", {}).get("install", False)
- # Is user defined Canal plugin and set it to install?
- canal_required = inventory["plugins"].get("canal", {}).get("install", False)
- # If Flannel and Canal is unset or not required to install, then install Calico
- if not flannel_required and not canal_required:
- inventory["plugins"]["calico"]["install"] = True
-
- return inventory
-
-
# DEPRECATED
def apply_calico_yaml(cluster: KubernetesCluster, calico_original_yaml: str, calico_yaml: str):
"""
diff --git a/kubemarine/plugins/haproxy_ingress.py b/kubemarine/plugins/haproxy_ingress.py
deleted file mode 100755
index 990003243..000000000
--- a/kubemarine/plugins/haproxy_ingress.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2021-2022 NetCracker Technology Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import time
-
-
-def override_priviledged_ports(cluster, service=None, namespace=None):
- cluster.log.debug('Unlocking privileged ports...')
- control_planes = cluster.nodes['control-plane']
- control_planes.sudo('sed \'/- kube-apiserver/a\ - --service-node-port-range=80-32000\' -i /etc/kubernetes/manifests/kube-apiserver.yaml', hide=False)
- control_planes.sudo('systemctl restart kubelet.service', hide=False)
- # TODO: Get rid of hardcoded timeout - Wait for service start on all nodes
- time.sleep(60)
- control_planes.get_first_member().sudo('kubectl patch svc %s -n %s -p \'[ { "op": "replace", "path": "/spec/ports/1/nodePort", "value": 443 }, { "op": "replace", "path": "/spec/ports/0/nodePort", "value": 80 } ]\' --type=\'json\'' % (service, namespace), hide=False)
- control_planes.sudo('sed \'/service-node-port-range=.*/d\' -i /etc/kubernetes/manifests/kube-apiserver.yaml', hide=False)
- control_planes.sudo('systemctl restart kubelet.service', hide=False)
- time.sleep(60)
diff --git a/kubemarine/resources/configurations/defaults.yaml b/kubemarine/resources/configurations/defaults.yaml
index 0c7d50ebd..c7fd2ab22 100644
--- a/kubemarine/resources/configurations/defaults.yaml
+++ b/kubemarine/resources/configurations/defaults.yaml
@@ -412,6 +412,7 @@ plugins:
calico:
version: '{{ globals.compatibility_map.software["calico"][services.kubeadm.kubernetesVersion|minorversion].version }}'
+ install: true
installation:
priority: 0
procedures:
@@ -527,17 +528,6 @@ plugins:
flexvol:
image: 'calico/pod2daemon-flexvol:{{ plugins.calico.version }}'
- flannel:
- installation:
- priority: 0
- procedures:
- - template: templates/plugins/flannel.yaml.j2
- - expect:
- pods:
- - coredns
- - kube-flannel-ds-amd64
- image: quay.io/coreos/flannel:v0.11.0-amd64
-
nginx-ingress-controller:
version: '{{ globals.compatibility_map.software["nginx-ingress-controller"][services.kubeadm.kubernetesVersion|minorversion].version }}'
install: true
@@ -583,31 +573,6 @@ plugins:
containerPort: 8443
protocol: TCP
- # TODO: support hostPort for haproxy-ingress
- haproxy-ingress-controller:
- install: false
- installation:
- priority: 1
- procedures:
- - template: templates/plugins/haproxy-ingress-controller.yaml.j2
- - expect:
- pods:
- - haproxy-ingress
- - python:
- module: plugins/haproxy_ingress.py
- method: override_priviledged_ports
- arguments:
- service: haproxy-ingress
- namespace: haproxy-controller
- controller:
- image: haproxytech/kubernetes-ingress:1.2.7
- nodeSelector:
- kubernetes.io/os: linux
- backend:
- image: k8s.gcr.io/defaultbackend:1.0
- nodeSelector:
- kubernetes.io/os: linux
-
kubernetes-dashboard:
version: '{{ globals.compatibility_map.software["kubernetes-dashboard"][services.kubeadm.kubernetesVersion|minorversion].version }}'
install: false
diff --git a/kubemarine/resources/schemas/definitions/plugins.json b/kubemarine/resources/schemas/definitions/plugins.json
index 8522c73be..21a73cdd6 100644
--- a/kubemarine/resources/schemas/definitions/plugins.json
+++ b/kubemarine/resources/schemas/definitions/plugins.json
@@ -6,15 +6,9 @@
"calico": {
"$ref": "plugins/calico.json"
},
- "flannel": {
- "$ref": "plugins/flannel.json"
- },
"nginx-ingress-controller": {
"$ref": "plugins/nginx-ingress-controller.json"
},
- "haproxy-ingress-controller": {
- "$ref": "plugins/haproxy-ingress-controller.json"
- },
"kubernetes-dashboard": {
"$ref": "plugins/kubernetes-dashboard.json"
},
diff --git a/kubemarine/resources/schemas/definitions/plugins/flannel.json b/kubemarine/resources/schemas/definitions/plugins/flannel.json
deleted file mode 100644
index 25f673b68..000000000
--- a/kubemarine/resources/schemas/definitions/plugins/flannel.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema",
- "type": "object",
- "description": "Enable and/or change default settings for experimental 'flannel' plugin",
- "allOf": [{"$ref": "generic_plugin.json#/definitions/Properties"}],
- "properties": {
- "image": {
- "type": "string",
- "default": "quay.io/coreos/flannel:v0.11.0-amd64",
- "description": "Specifies the string for the Flannel image"
- }
- },
- "propertyNames": {
- "anyOf": [
- {"$ref": "generic_plugin.json#/definitions/PropertyNames"},
- {"enum": ["image"]}
- ]
- }
-}
diff --git a/kubemarine/resources/schemas/definitions/plugins/haproxy-ingress-controller.json b/kubemarine/resources/schemas/definitions/plugins/haproxy-ingress-controller.json
deleted file mode 100644
index 72469dbb6..000000000
--- a/kubemarine/resources/schemas/definitions/plugins/haproxy-ingress-controller.json
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema",
- "type": "object",
- "description": "Enable and/or change default settings for experimental 'haproxy-ingress-controller' plugin",
- "allOf": [{"$ref": "generic_plugin.json#/definitions/Properties"}],
- "properties": {
- "controller": {
- "type": "object",
- "description": "HAProxy Ingress Controller configuration",
- "properties": {
- "image": {
- "type": "string",
- "default": "haproxytech/kubernetes-ingress:1.2.7",
- "description": "HAProxy Ingress Controller image"
- },
- "nodeSelector": {
- "$ref": "generic_plugin.json#/definitions/CommonNodeSelector"
- },
- "tolerations": {
- "$ref": "generic_plugin.json#/definitions/CustomTolerations"
- }
- },
- "additionalProperties": false
- },
- "backend": {
- "type": "object",
- "description": "HAProxy Ingress Default Backend configuration",
- "properties": {
- "image": {
- "type": "string",
- "default": "k8s.gcr.io/defaultbackend:1.0",
- "description": "Default Backend image for HAProxy Ingress Controller"
- },
- "nodeSelector": {
- "$ref": "generic_plugin.json#/definitions/CommonNodeSelector"
- },
- "tolerations": {
- "$ref": "generic_plugin.json#/definitions/CustomTolerations"
- }
- },
- "additionalProperties": false
- }
- },
- "propertyNames": {
- "anyOf": [
- {"$ref": "generic_plugin.json#/definitions/PropertyNames"},
- {"enum": ["controller", "backend"]}
- ]
- }
-}
diff --git a/kubemarine/templates/plugins/flannel.yaml.j2 b/kubemarine/templates/plugins/flannel.yaml.j2
deleted file mode 100644
index 1a46885d1..000000000
--- a/kubemarine/templates/plugins/flannel.yaml.j2
+++ /dev/null
@@ -1,605 +0,0 @@
-# Source: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
-
----
-
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: psp.flannel.unprivileged
- annotations:
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
- seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
- apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
- apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
-spec:
- privileged: false
- volumes:
- - configMap
- - secret
- - emptyDir
- - hostPath
- allowedHostPaths:
- - pathPrefix: "/etc/cni/net.d"
- - pathPrefix: "/etc/kube-flannel"
- - pathPrefix: "/run/flannel"
- readOnlyRootFilesystem: false
- # Users and groups
- runAsUser:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- fsGroup:
- rule: RunAsAny
- # Privilege Escalation
- allowPrivilegeEscalation: false
- defaultAllowPrivilegeEscalation: false
- # Capabilities
- allowedCapabilities: ['NET_ADMIN']
- defaultAddCapabilities: []
- requiredDropCapabilities: []
- # Host namespaces
- hostPID: false
- hostIPC: false
- hostNetwork: true
- hostPorts:
- - min: 0
- max: 65535
- # SELinux
- seLinux:
- # SELinux is unused in CaaSP
- rule: 'RunAsAny'
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: flannel
-rules:
- - apiGroups: ['extensions']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['psp.flannel.unprivileged']
- - apiGroups:
- - ""
- resources:
- - pods
- verbs:
- - get
- - apiGroups:
- - ""
- resources:
- - nodes
- verbs:
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - nodes/status
- verbs:
- - patch
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: flannel
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: flannel
-subjects:
-- kind: ServiceAccount
- name: flannel
- namespace: kube-system
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: flannel
- namespace: kube-system
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: kube-flannel-cfg
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-data:
- cni-conf.json: |
- {
- "name": "cbr0",
- "cniVersion": "0.3.1",
- "plugins": [
- {
- "type": "flannel",
- "delegate": {
- "hairpinMode": true,
- "isDefaultGateway": true
- }
- },
- {
- "type": "portmap",
- "capabilities": {
- "portMappings": true
- }
- }
- ]
- }
- net-conf.json: |
- {
- "Network": "{{ services["kubeadm"]["networking"]["podSubnet"] }}",
- "Backend": {
- "Type": "vxlan"
- }
- }
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: kube-flannel-ds-amd64
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-spec:
- selector:
- matchLabels:
- app: flannel
- template:
- metadata:
- labels:
- tier: node
- app: flannel
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: beta.kubernetes.io/os
- operator: In
- values:
- - linux
- - key: beta.kubernetes.io/arch
- operator: In
- values:
- - amd64
- hostNetwork: true
- tolerations:
- - operator: Exists
- effect: NoSchedule
- serviceAccountName: flannel
- initContainers:
- - name: install-cni
- image: {% if plugins['flannel']['installation']['registry'] is defined and plugins['flannel']['installation']['registry']|length %}{{ plugins['flannel']['installation']['registry'] }}/{% endif %}{{ plugins['flannel']['image'] }}
- command:
- - cp
- args:
- - -f
- - /etc/kube-flannel/cni-conf.json
- - /etc/cni/net.d/10-flannel.conflist
- volumeMounts:
- - name: cni
- mountPath: /etc/cni/net.d
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- containers:
- - name: kube-flannel
- image: {% if plugins['flannel']['installation']['registry'] is defined and plugins['flannel']['installation']['registry']|length %}{{ plugins['flannel']['installation']['registry'] }}/{% endif %}{{ plugins['flannel']['image'] }}
- command:
- - /opt/bin/flanneld
- args:
- - --ip-masq
- - --kube-subnet-mgr
- resources:
- requests:
- cpu: "100m"
- memory: "50Mi"
- limits:
- cpu: "100m"
- memory: "50Mi"
- securityContext:
- privileged: false
- capabilities:
- add: ["NET_ADMIN"]
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run/flannel
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- volumes:
- - name: run
- hostPath:
- path: /run/flannel
- - name: cni
- hostPath:
- path: /etc/cni/net.d
- - name: flannel-cfg
- configMap:
- name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: kube-flannel-ds-arm64
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-spec:
- selector:
- matchLabels:
- app: flannel
- template:
- metadata:
- labels:
- tier: node
- app: flannel
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: beta.kubernetes.io/os
- operator: In
- values:
- - linux
- - key: beta.kubernetes.io/arch
- operator: In
- values:
- - arm64
- hostNetwork: true
- tolerations:
- - operator: Exists
- effect: NoSchedule
- serviceAccountName: flannel
- initContainers:
- - name: install-cni
- image: quay.io/coreos/flannel:v0.11.0-arm64
- command:
- - cp
- args:
- - -f
- - /etc/kube-flannel/cni-conf.json
- - /etc/cni/net.d/10-flannel.conflist
- volumeMounts:
- - name: cni
- mountPath: /etc/cni/net.d
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- containers:
- - name: kube-flannel
- image: quay.io/coreos/flannel:v0.11.0-arm64
- command:
- - /opt/bin/flanneld
- args:
- - --ip-masq
- - --kube-subnet-mgr
- resources:
- requests:
- cpu: "100m"
- memory: "50Mi"
- limits:
- cpu: "100m"
- memory: "50Mi"
- securityContext:
- privileged: false
- capabilities:
- add: ["NET_ADMIN"]
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run/flannel
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- volumes:
- - name: run
- hostPath:
- path: /run/flannel
- - name: cni
- hostPath:
- path: /etc/cni/net.d
- - name: flannel-cfg
- configMap:
- name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: kube-flannel-ds-arm
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-spec:
- selector:
- matchLabels:
- app: flannel
- template:
- metadata:
- labels:
- tier: node
- app: flannel
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: beta.kubernetes.io/os
- operator: In
- values:
- - linux
- - key: beta.kubernetes.io/arch
- operator: In
- values:
- - arm
- hostNetwork: true
- tolerations:
- - operator: Exists
- effect: NoSchedule
- serviceAccountName: flannel
- initContainers:
- - name: install-cni
- image: quay.io/coreos/flannel:v0.11.0-arm
- command:
- - cp
- args:
- - -f
- - /etc/kube-flannel/cni-conf.json
- - /etc/cni/net.d/10-flannel.conflist
- volumeMounts:
- - name: cni
- mountPath: /etc/cni/net.d
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- containers:
- - name: kube-flannel
- image: quay.io/coreos/flannel:v0.11.0-arm
- command:
- - /opt/bin/flanneld
- args:
- - --ip-masq
- - --kube-subnet-mgr
- resources:
- requests:
- cpu: "100m"
- memory: "50Mi"
- limits:
- cpu: "100m"
- memory: "50Mi"
- securityContext:
- privileged: false
- capabilities:
- add: ["NET_ADMIN"]
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run/flannel
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- volumes:
- - name: run
- hostPath:
- path: /run/flannel
- - name: cni
- hostPath:
- path: /etc/cni/net.d
- - name: flannel-cfg
- configMap:
- name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: kube-flannel-ds-ppc64le
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-spec:
- selector:
- matchLabels:
- app: flannel
- template:
- metadata:
- labels:
- tier: node
- app: flannel
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: beta.kubernetes.io/os
- operator: In
- values:
- - linux
- - key: beta.kubernetes.io/arch
- operator: In
- values:
- - ppc64le
- hostNetwork: true
- tolerations:
- - operator: Exists
- effect: NoSchedule
- serviceAccountName: flannel
- initContainers:
- - name: install-cni
- image: quay.io/coreos/flannel:v0.11.0-ppc64le
- command:
- - cp
- args:
- - -f
- - /etc/kube-flannel/cni-conf.json
- - /etc/cni/net.d/10-flannel.conflist
- volumeMounts:
- - name: cni
- mountPath: /etc/cni/net.d
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- containers:
- - name: kube-flannel
- image: quay.io/coreos/flannel:v0.11.0-ppc64le
- command:
- - /opt/bin/flanneld
- args:
- - --ip-masq
- - --kube-subnet-mgr
- resources:
- requests:
- cpu: "100m"
- memory: "50Mi"
- limits:
- cpu: "100m"
- memory: "50Mi"
- securityContext:
- privileged: false
- capabilities:
- add: ["NET_ADMIN"]
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run/flannel
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- volumes:
- - name: run
- hostPath:
- path: /run/flannel
- - name: cni
- hostPath:
- path: /etc/cni/net.d
- - name: flannel-cfg
- configMap:
- name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: kube-flannel-ds-s390x
- namespace: kube-system
- labels:
- tier: node
- app: flannel
-spec:
- selector:
- matchLabels:
- app: flannel
- template:
- metadata:
- labels:
- tier: node
- app: flannel
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: beta.kubernetes.io/os
- operator: In
- values:
- - linux
- - key: beta.kubernetes.io/arch
- operator: In
- values:
- - s390x
- hostNetwork: true
- tolerations:
- - operator: Exists
- effect: NoSchedule
- serviceAccountName: flannel
- initContainers:
- - name: install-cni
- image: quay.io/coreos/flannel:v0.11.0-s390x
- command:
- - cp
- args:
- - -f
- - /etc/kube-flannel/cni-conf.json
- - /etc/cni/net.d/10-flannel.conflist
- volumeMounts:
- - name: cni
- mountPath: /etc/cni/net.d
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- containers:
- - name: kube-flannel
- image: quay.io/coreos/flannel:v0.11.0-s390x
- command:
- - /opt/bin/flanneld
- args:
- - --ip-masq
- - --kube-subnet-mgr
- resources:
- requests:
- cpu: "100m"
- memory: "50Mi"
- limits:
- cpu: "100m"
- memory: "50Mi"
- securityContext:
- privileged: false
- capabilities:
- add: ["NET_ADMIN"]
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run/flannel
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
- volumes:
- - name: run
- hostPath:
- path: /run/flannel
- - name: cni
- hostPath:
- path: /etc/cni/net.d
- - name: flannel-cfg
- configMap:
- name: kube-flannel-cfg
\ No newline at end of file
diff --git a/kubemarine/templates/plugins/haproxy-ingress-controller.yaml.j2 b/kubemarine/templates/plugins/haproxy-ingress-controller.yaml.j2
deleted file mode 100644
index f16720e1d..000000000
--- a/kubemarine/templates/plugins/haproxy-ingress-controller.yaml.j2
+++ /dev/null
@@ -1,234 +0,0 @@
-# Source: https://github.com/haproxytech/kubernetes-ingress/blob/master/deploy/haproxy-ingress.yaml
----
-apiVersion: v1
-kind: Namespace
-metadata:
- name: haproxy-controller
- {% if rbac['admission'] == "pss" and rbac["pss"]["pod-security"] == "enabled" and rbac["pss"]["defaults"]["enforce"] != "privileged" %}
- labels:
- pod-security.kubernetes.io/enforce: privileged
- pod-security.kubernetes.io/enforce-version: latest
- pod-security.kubernetes.io/audit: privileged
- pod-security.kubernetes.io/audit-version: latest
- pod-security.kubernetes.io/warn: privileged
- pod-security.kubernetes.io/warn-version: latest
- {% endif %}
-
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: haproxy-ingress-service-account
- namespace: haproxy-controller
-
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: haproxy-ingress-cluster-role
-rules:
-- apiGroups:
- - ""
- resources:
- - configmaps
- - endpoints
- - nodes
- - pods
- - services
- - namespaces
- - events
- - serviceaccounts
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - "extensions"
- resources:
- - ingresses
- - ingresses/status
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - list
- - watch
- - create
- - patch
- - update
-- apiGroups:
- - extensions
- resources:
- - ingresses
- verbs:
- - get
- - list
- - watch
-# haproxy requires host network access
-- apiGroups: ['extensions']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['oob-host-network-psp']
-# haproxy requires root privileges
-- apiGroups: ['extensions']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['oob-anyuid-psp']
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: haproxy-ingress-cluster-role-binding
- namespace: haproxy-controller
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: haproxy-ingress-cluster-role
-subjects:
-- kind: ServiceAccount
- name: haproxy-ingress-service-account
- namespace: haproxy-controller
-
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: haproxy-configmap
- namespace: default
-data:
-
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- run: ingress-default-backend
- name: ingress-default-backend
- namespace: haproxy-controller
-spec:
- replicas: 1
- selector:
- matchLabels:
- run: ingress-default-backend
- template:
- metadata:
- labels:
- run: ingress-default-backend
- spec:
- serviceAccountName: haproxy-ingress-service-account
- nodeSelector:
- {{ plugins['haproxy-ingress-controller']['backend']['nodeSelector'] | toyaml | indent(width=8, first=False) }}
- {% if plugins['haproxy-ingress-controller']['backend']['tolerations'] is defined -%}
- tolerations:
- {{ plugins['haproxy-ingress-controller']['backend']['tolerations'] | toyaml | indent(width=8, first=False) -}}
- {%- endif %}
- containers:
- - name: ingress-default-backend
- image: {% if plugins['haproxy-ingress-controller']['installation']['registry'] is defined and plugins['haproxy-ingress-controller']['installation']['registry']|length %}{{ plugins['haproxy-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['haproxy-ingress-controller']['backend']['image'] }}
- ports:
- - containerPort: 8080
-
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- run: ingress-default-backend
- name: ingress-default-backend
- namespace: haproxy-controller
-spec:
- selector:
- run: ingress-default-backend
- ports:
- - name: port-1
- port: 8080
- protocol: TCP
- targetPort: 8080
-
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- run: haproxy-ingress
- name: haproxy-ingress
- namespace: haproxy-controller
-spec:
- replicas: 1
- selector:
- matchLabels:
- run: haproxy-ingress
- template:
- metadata:
- labels:
- run: haproxy-ingress
- spec:
- serviceAccountName: haproxy-ingress-service-account
- nodeSelector:
- {{ plugins['haproxy-ingress-controller']['controller']['nodeSelector'] | toyaml | indent(width=8, first=False) }}
- {% if plugins['haproxy-ingress-controller']['controller']['tolerations'] is defined -%}
- tolerations:
- {{ plugins['haproxy-ingress-controller']['controller']['tolerations'] | toyaml | indent(width=8, first=False) -}}
- {%- endif %}
- containers:
- - name: haproxy-ingress
- image: {% if plugins['haproxy-ingress-controller']['installation']['registry'] is defined and plugins['haproxy-ingress-controller']['installation']['registry']|length %}{{ plugins['haproxy-ingress-controller']['installation']['registry'] }}/{% endif %}{{ plugins['haproxy-ingress-controller']['controller']['image'] }}
- args:
- - --configmap=default/haproxy-configmap
- - --default-backend-service=haproxy-controller/ingress-default-backend
- resources:
- requests:
- cpu: "500m"
- memory: "50Mi"
- livenessProbe:
- httpGet:
- path: /healthz
- port: 1042
- ports:
- - name: http
- containerPort: 80
- - name: https
- containerPort: 443
- - name: stat
- containerPort: 1024
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
-
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- run: haproxy-ingress
- name: haproxy-ingress
- namespace: haproxy-controller
-spec:
- selector:
- run: haproxy-ingress
- type: NodePort
- ports:
- - name: http
- port: 80
- protocol: TCP
- targetPort: 80
- - name: https
- port: 443
- protocol: TCP
- targetPort: 443
- - name: stat
- port: 1024
- protocol: TCP
- targetPort: 1024
|