From fed9e1bd53784edf4c495e2b3736eb937d694ec6 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Thu, 2 May 2024 15:15:43 +0200 Subject: [PATCH 1/8] Add data structure for python_pkgs --- .../kubernetes/preinstall/tasks/0040-verify-settings.yml | 9 ++++++--- roles/kubernetes/preinstall/vars/main.yml | 4 +++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml index 55dda5b8109..79e55a1b107 100644 --- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml @@ -327,11 +327,14 @@ - name: Verify that the packages list structure is valid ansible.utils.validate: criteria: "{{ lookup('file', 'pkgs-schema.json') }}" - data: "{{ pkgs }}" + data: "{{ os_pkgs }}" -- name: Verify that the packages list is sorted +- name: Verify that package lists are sorted vars: - pkgs_lists: "{{ pkgs.keys() | list }}" + pkgs_lists: "{{ lookup('vars', item + '_pkgs').keys() | list }}" assert: that: "pkgs_lists | sort == pkgs_lists" fail_msg: "pkgs is not sorted: {{ pkgs_lists | ansible.utils.fact_diff(pkgs_lists | sort) }}" + loop: + - os + - python diff --git a/roles/kubernetes/preinstall/vars/main.yml b/roles/kubernetes/preinstall/vars/main.yml index 4b3524a54df..0162f8423ae 100644 --- a/roles/kubernetes/preinstall/vars/main.yml +++ b/roles/kubernetes/preinstall/vars/main.yml @@ -1,5 +1,5 @@ --- -pkgs: +os_pkgs: apparmor: &debian_family_base os: families: @@ -104,3 +104,5 @@ pkgs: tar: {} unzip: {} xfsprogs: {} + +python_pkgs: {} From d162b536c42bd03459c85b7d24a762d6f5384990 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Thu, 2 May 2024 15:17:45 +0200 Subject: [PATCH 2/8] Add install infra for python_pkgs Some ansible module requires specific python libraries on the hosts managed by ansible. In particular, the kubernetes.core.k8s module (which is a better alternative than our own custom kubernetes-sigs.kubespray.kube module) require "kubernetes". Another potential useful candidate would be python "cryptography", which would allow us to use the community.crypto collection, advantageously replacing the ad-hoc stuff we have in roles/etcd. To allow granular python packages installation (only install where needed), we reuse the infrastructure introduced in 663fcd104 (Filter packages installation by OS and by group, 2024-04-05) to also install python packages in dedicated kubespray virtualenvs (one stack for each ansible module requiring python deps, to avoid conflicts). To ensure supply chain security, we use repeatable installs (https://pip.pypa.io/en/stable/topics/repeatable-installs/) and only install wheels. --- .gitattributes | 1 + .../files/host_virtualenvs/requirements.txt | 1 + .../files/host_virtualenvs/update_venvs.sh | 6 +++ .../preinstall/tasks/0070-system-packages.yml | 51 +++++++++++++++---- .../kubespray-defaults/defaults/main/main.yml | 2 + 5 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 .gitattributes create mode 100644 roles/kubernetes/preinstall/files/host_virtualenvs/requirements.txt create mode 100755 roles/kubernetes/preinstall/files/host_virtualenvs/update_venvs.sh diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..4c3997e3137 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +roles/kubernetes/preinstall/files/host_virtualenvs/*/requirements.txt linguist-generated=true diff --git a/roles/kubernetes/preinstall/files/host_virtualenvs/requirements.txt b/roles/kubernetes/preinstall/files/host_virtualenvs/requirements.txt new file mode 100644 index 00000000000..776f187e890 --- /dev/null +++ b/roles/kubernetes/preinstall/files/host_virtualenvs/requirements.txt @@ -0,0 +1 @@ +pip-tools==7.4.1 diff --git a/roles/kubernetes/preinstall/files/host_virtualenvs/update_venvs.sh b/roles/kubernetes/preinstall/files/host_virtualenvs/update_venvs.sh new file mode 100755 index 00000000000..602c42c9f39 --- /dev/null +++ b/roles/kubernetes/preinstall/files/host_virtualenvs/update_venvs.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for venv in */requirements.in +do + pip-compile --generate-hashes -o "$(dirname $venv)/requirements.txt" "${venv}" +done diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml index 7085ffb0c49..99ce2c98828 100644 --- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml +++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -59,12 +59,13 @@ tags: - bootstrap-os -- name: Install packages requirements +- name: Install requirements + tags: + - bootstrap-os vars: # The json_query for selecting packages name is split for readability # see files/pkgs-schema.json for the structure of `pkgs` # and the matching semantics - full_query: "[? value | (enabled == null || enabled) && ( {{ filters_os }} ) && ( {{ filters_groups }} ) ].key" filters_groups: "groups | @ == null || [? contains(`{{ group_names }}`, @)]" filters_os: "os == null || (os | ( {{ filters_family }} ) || ( {{ filters_distro }} ))" dquote: !unsafe '"' @@ -75,12 +76,40 @@ contains(not_null(versions, `[]`), '{{ ansible_distribution_version }}') || contains(not_null(releases, `[]`), '{{ ansible_distribution_release }}')" filters_family: "families && contains(families, '{{ ansible_os_family }}')" - package: - name: "{{ pkgs | dict2items | to_json|from_json | community.general.json_query(full_query) }}" - state: present - register: pkgs_task_result - until: pkgs_task_result is succeeded - retries: "{{ pkg_install_retries }}" - delay: "{{ retry_stagger | random + 3 }}" - tags: - - bootstrap-os + to_install: "{{ pkgs | dict2items | to_json|from_json | community.general.json_query(full_query) }}" + block: + - name: Install system packages + vars: + full_query: "[? value | (enabled == null || enabled) && ( {{ filters_os }} ) && ( {{ filters_groups }} ) ].key" + pkgs: "{{ os_pkgs }}" + package: + name: "{{ to_install }}" + state: present + register: pkgs_task_result + until: pkgs_task_result is succeeded + retries: "{{ pkg_install_retries }}" + delay: "{{ retry_stagger | random + 3 }}" + - name: Copy requirements.txt + copy: + dest: "{{ kubespray_virtualenvs_base }}/{{ item }}/" + src: "host_virtualenvs/{{ item }}/requirements.txt" + mode: 0644 + loop: "{{ to_install }}" + vars: + full_query: "[? value | ( {{ filters_groups }} ) ].key" + pkgs: "{{ python_pkgs }}" + when: + - to_install | length != 0 + - name: Install virtualenv with needed packages + loop: "{{ to_install }}" + vars: + full_query: "[? value | ( {{ filters_groups }} ) ].key" + pkgs: "{{ python_pkgs }}" + when: + - to_install | length != 0 + ansible.builtin.pip: + requirements: "{{ kubespray_virtualenvs_base }}/{{ item }}/requirements.txt" + extra_args: "--only-binary :all: --no-deps --require-hashes" + virtualenv_site_packages: true + virtualenv: "{{ kubespray_virtualenvs_base }}/{{ item }}" + virtualenv_command: "{{ ansible_facts.python.executable }} -m venv" diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml index 69dd01ea207..635aefd516f 100644 --- a/roles/kubespray-defaults/defaults/main/main.yml +++ b/roles/kubespray-defaults/defaults/main/main.yml @@ -6,6 +6,8 @@ ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='s # selinux state preinstall_selinux_state: permissive +kubespray_virtualenvs_base: "/opt/virtualenvs/kubespray" + # Setting this value to false will fail # For details, read this comment https://github.com/kubernetes-sigs/kubespray/pull/11016#issuecomment-2004985001 kube_api_anonymous_auth: true From 173a387ba06be30570088fd129032da791452165 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Mon, 13 May 2024 11:04:21 +0200 Subject: [PATCH 3/8] Update pre-commit-hooks Fix requirements-txt-fixer throwing on pip-compile --generate-hashes output --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c2380522a33..a67dcc4275a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v4.5.0 hooks: - id: check-added-large-files - id: check-case-conflict From ef08aa07d411f20d6b8119781fa3475e89f8c9b4 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Wed, 6 Dec 2023 23:13:22 +0100 Subject: [PATCH 4/8] Move handling of secondary coredns into jinja Some of coredns templates depend on variables which are defined at the `template` task level. The consequence is than using the template in another way (in particular, we want to use the kubernetes.core.k8s template list feature, see following commits) is difficult. Loop inside the template rather than doing a separate task. This makes the template more self-contained and has the added benefits of deduplicating code. --- .../kubernetes-apps/ansible/tasks/coredns.yml | 24 ------------------- .../templates/coredns-deployment.yml.j2 | 2 ++ .../coredns-poddisruptionbudget.yml.j2 | 3 +++ .../ansible/templates/coredns-svc.yml.j2 | 4 +++- .../ansible/templates/dns-autoscaler.yml.j2 | 4 +++- roles/kubernetes-apps/ansible/vars/main.yml | 1 + 6 files changed, 12 insertions(+), 26 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/vars/main.yml diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml index 897c6189ff5..bed0b6c0d9d 100644 --- a/roles/kubernetes-apps/ansible/tasks/coredns.yml +++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml @@ -17,8 +17,6 @@ - { name: coredns, file: coredns-poddisruptionbudget.yml, type: poddisruptionbudget, condition: coredns_pod_disruption_budget } - { name: dns-autoscaler, file: dns-autoscaler-sa.yml, type: sa } register: coredns_manifests - vars: - clusterIP: "{{ skydns_server }}" when: - dns_mode in ['coredns', 'coredns_dual'] - inventory_hostname == groups['kube_control_plane'][0] @@ -26,25 +24,3 @@ - item.condition | default(True) tags: - coredns - -- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template - template: - src: "{{ item.src }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: 0644 - with_items: - - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment } - - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc } - - { name: dns-autoscaler, src: dns-autoscaler.yml, file: coredns-autoscaler-secondary.yml, type: deployment } - - { name: coredns, src: coredns-poddisruptionbudget.yml, file: coredns-poddisruptionbudget-secondary.yml, type: poddisruptionbudget, condition: coredns_pod_disruption_budget } - register: coredns_secondary_manifests - vars: - clusterIP: "{{ skydns_server_secondary }}" - coredns_ordinal_suffix: "-secondary" - when: - - dns_mode == 'coredns_dual' - - inventory_hostname == groups['kube_control_plane'][0] - - enable_dns_autoscaler or item.name != 'dns-autoscaler' - - item.condition | default(True) - tags: - - coredns diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 index cbdca572759..e8778421a05 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -1,3 +1,4 @@ +{% for coredns_ordinal_suffix in coredns_ordinal_suffixes %} --- apiVersion: apps/v1 kind: Deployment @@ -121,3 +122,4 @@ spec: - key: hosts path: hosts {% endif %} +{% endfor %} diff --git a/roles/kubernetes-apps/ansible/templates/coredns-poddisruptionbudget.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-poddisruptionbudget.yml.j2 index 7df6b262186..1a5df80214e 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-poddisruptionbudget.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-poddisruptionbudget.yml.j2 @@ -1,3 +1,5 @@ +{% for coredns_ordinal_suffix in coredns_ordinal_suffixes %} +--- apiVersion: policy/v1 kind: PodDisruptionBudget metadata: @@ -7,3 +9,4 @@ spec: selector: matchLabels: k8s-app: kube-dns{{ coredns_ordinal_suffix }} +{% endfor %} diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 index 961e02a6aad..31cc5496db0 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -1,3 +1,4 @@ +{% for coredns_ordinal_suffix in coredns_ordinal_suffixes %} --- apiVersion: v1 kind: Service @@ -15,7 +16,7 @@ metadata: spec: selector: k8s-app: kube-dns{{ coredns_ordinal_suffix }} - clusterIP: {{ clusterIP }} + clusterIP: {{ lookup('vars', 'skydns_server' + (coredns_ordinal_suffix | replace('-', '_'))) }} ports: - name: dns port: 53 @@ -28,3 +29,4 @@ spec: - name: metrics port: 9153 protocol: TCP +{% endfor %} diff --git a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 index c10ebf97f4d..b58a62a1f64 100644 --- a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 @@ -1,4 +1,3 @@ ---- # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +{% for coredns_ordinal_suffix in coredns_ordinal_suffixes %} +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -84,3 +85,4 @@ spec: - --configmap=dns-autoscaler{{ coredns_ordinal_suffix }} - --target=Deployment/coredns{{ coredns_ordinal_suffix }} serviceAccountName: dns-autoscaler +{% endfor %} diff --git a/roles/kubernetes-apps/ansible/vars/main.yml b/roles/kubernetes-apps/ansible/vars/main.yml new file mode 100644 index 00000000000..92143ae4bc4 --- /dev/null +++ b/roles/kubernetes-apps/ansible/vars/main.yml @@ -0,0 +1 @@ +coredns_ordinal_suffixes: "{{ [''] + (dns_mode == 'coredns_dual') | ternary(['-secondary'], []) }}" From 99bf2ee5f39f37d2118065c23a6d444183e9e6c5 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Wed, 6 Dec 2023 23:59:36 +0100 Subject: [PATCH 5/8] Less vars in nodelocaldns templates Put intermediate templates vars in vars/ rather than in facts --- .../ansible/tasks/nodelocaldns.yml | 46 ------------------- .../templates/nodelocaldns-config.yml.j2 | 16 +++---- roles/kubernetes-apps/ansible/vars/main.yml | 13 ++++++ 3 files changed, 21 insertions(+), 54 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml index b438afb88c4..b6de7cc4a5d 100644 --- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml +++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml @@ -1,21 +1,4 @@ --- -- name: Kubernetes Apps | set up necessary nodelocaldns parameters - set_fact: - # noqa: jinja[spacing] - primaryClusterIP: >- - {%- if dns_mode in ['coredns', 'coredns_dual'] -%} - {{ skydns_server }} - {%- elif dns_mode == 'manual' -%} - {{ manual_dns_server }} - {%- endif -%} - secondaryclusterIP: "{{ skydns_server_secondary }}" - when: - - enable_nodelocaldns - - inventory_hostname == groups['kube_control_plane'] | first - tags: - - nodelocaldns - - coredns - - name: Kubernetes Apps | Lay Down nodelocaldns Template template: src: "{{ item.file }}.j2" @@ -26,20 +9,6 @@ - { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa } - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset } register: nodelocaldns_manifests - vars: - # noqa: jinja[spacing] - forwardTarget: >- - {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} - {{ primaryClusterIP }} {{ secondaryclusterIP }} - {%- else -%} - {{ primaryClusterIP }} - {%- endif -%} - upstreamForwardTarget: >- - {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%} - {{ upstream_dns_servers | join(' ') }} - {%- else -%} - /etc/resolv.conf - {%- endif -%} when: - enable_nodelocaldns - inventory_hostname == groups['kube_control_plane'] | first @@ -55,21 +24,6 @@ with_items: - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset } register: nodelocaldns_second_manifests - vars: - # noqa: jinja[spacing] - forwardTarget: >- - {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%} - {{ primaryClusterIP }} {{ secondaryclusterIP }} - {%- else -%} - {{ primaryClusterIP }} - {%- endif -%} - # noqa: jinja[spacing] - upstreamForwardTarget: >- - {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%} - {{ upstream_dns_servers | join(' ') }} - {%- else -%} - /etc/resolv.conf - {%- endif -%} when: - enable_nodelocaldns - enable_nodelocaldns_secondary diff --git a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 index e8ce54529c0..2cc5f385d79 100644 --- a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 @@ -41,7 +41,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} @@ -58,7 +58,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} @@ -69,7 +69,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} @@ -80,7 +80,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { + forward . {{ nodelocaldns_upstream_forward_target }}{% if dns_upstream_forward_extra_opts is defined %} { {% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} {{ optname }} {{ optvalue }} {% endfor %} @@ -123,7 +123,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} @@ -140,7 +140,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} @@ -151,7 +151,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ forwardTarget }} { + forward . {{ nodelocaldns_forward_target }} { force_tcp } prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} @@ -162,7 +162,7 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { + forward . {{ nodelocaldns_upstream_forward_target }}{% if dns_upstream_forward_extra_opts is defined %} { {% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} {{ optname }} {{ optvalue }} {% endfor %} diff --git a/roles/kubernetes-apps/ansible/vars/main.yml b/roles/kubernetes-apps/ansible/vars/main.yml index 92143ae4bc4..47b9681d064 100644 --- a/roles/kubernetes-apps/ansible/vars/main.yml +++ b/roles/kubernetes-apps/ansible/vars/main.yml @@ -1 +1,14 @@ +--- +# CoreDNS vars coredns_ordinal_suffixes: "{{ [''] + (dns_mode == 'coredns_dual') | ternary(['-secondary'], []) }}" + +# Nodelocal DNS vars + +primary_dns_by_mode: + coredns: "{{ skydns_server }}" + coredns_dual: "{{ skydns_server }}" + manual: "{{ manual_dns_server }}" + none: "" + +nodelocaldns_forward_target: "{{ ([primary_dns_by_mode[dns_mode]] + (dns_mode == 'coredns_dual') | ternary([skydns_server_secondary], [])) | join(' ') }}" +nodelocaldns_upstream_forward_target: "{{ upstream_dns_servers | d([]) | join(' ') | d('/etc/resolv.conf', true) }}" From 7dcafe42af62c823e42d4d12c79d849ec64010de Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Thu, 2 May 2024 16:09:36 +0200 Subject: [PATCH 6/8] Install python k8s client library on control plane nodes Kubectl client-side apply breaks coredns on upgrade because the old and the new version are not resolved correctly (see https://github.com/kubernetes/kubernetes/issues/39188#issuecomment-1383314789) TL;DR: the merge key for the port array is only the port number, not port+protocol. Server-side apply solves this issue, but our custom kube module is not server-side apply ready. While we could improve our custom kube module, it will be a lesser maintenance load going forward to progressively drop it and switch to kubernetes.core.k8s, which has more features and is maintained by upstream. Do that now for coredns manifests. Add the python k8s client on the control plane (no need for it elsewhere), and the python-venv on distributions which need it to create a virtualenv. --- .../kubernetes.core/requirements.in | 1 + .../kubernetes.core/requirements.txt | 223 ++++++++++++++++++ roles/kubernetes/preinstall/vars/main.yml | 11 +- 3 files changed, 234 insertions(+), 1 deletion(-) create mode 100644 roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.in create mode 100644 roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.txt diff --git a/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.in b/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.in new file mode 100644 index 00000000000..ed572333605 --- /dev/null +++ b/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.in @@ -0,0 +1 @@ +kubernetes==29.* diff --git a/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.txt b/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.txt new file mode 100644 index 00000000000..1a70351d394 --- /dev/null +++ b/roles/kubernetes/preinstall/files/host_virtualenvs/kubernetes.core/requirements.txt @@ -0,0 +1,223 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --generate-hashes --output-file=kubernetes.core/requirements.txt kubernetes.core/requirements.in +# +cachetools==5.3.3 \ + --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ + --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 + # via google-auth +certifi==2024.2.2 \ + --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \ + --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1 + # via + # kubernetes + # requests +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 + # via requests +google-auth==2.29.0 \ + --hash=sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360 \ + --hash=sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415 + # via kubernetes +idna==3.7 \ + --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ + --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 + # via requests +kubernetes==29.0.0 \ + --hash=sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e \ + --hash=sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459 + # via -r kubernetes.core/requirements.in +oauthlib==3.2.2 \ + --hash=sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca \ + --hash=sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918 + # via + # kubernetes + # requests-oauthlib +pyasn1==0.6.0 \ + --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ + --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.0 \ + --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ + --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b + # via google-auth +python-dateutil==2.9.0.post0 \ + --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ + --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 + # via kubernetes +pyyaml==6.0.1 \ + --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \ + --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ + --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \ + --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ + --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ + --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ + --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ + --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ + --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ + --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ + --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \ + --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \ + --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ + --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \ + --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ + --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ + --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ + --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ + --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ + --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ + --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ + --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \ + --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ + --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ + --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ + --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \ + --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ + --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ + --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ + --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ + --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ + --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ + --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ + --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ + --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ + --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ + --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ + --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ + --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ + --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ + --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ + --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \ + --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ + --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \ + --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ + --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ + --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ + --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ + --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ + --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f + # via kubernetes +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 + # via + # kubernetes + # requests-oauthlib +requests-oauthlib==2.0.0 \ + --hash=sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36 \ + --hash=sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9 + # via kubernetes +rsa==4.9 \ + --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ + --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 + # via google-auth +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # kubernetes + # python-dateutil +urllib3==2.2.1 \ + --hash=sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d \ + --hash=sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19 + # via + # kubernetes + # requests +websocket-client==1.8.0 \ + --hash=sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526 \ + --hash=sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da + # via kubernetes diff --git a/roles/kubernetes/preinstall/vars/main.yml b/roles/kubernetes/preinstall/vars/main.yml index 0162f8423ae..3ef0ecfc2e0 100644 --- a/roles/kubernetes/preinstall/vars/main.yml +++ b/roles/kubernetes/preinstall/vars/main.yml @@ -98,6 +98,12 @@ os_pkgs: - "8" - "9" CentOS: *major_redhat_like + python3-venv: + os: + families: + - Debian + groups: + - kube_control_plane rsync: {} socat: {} software-properties-common: *debian_family_base @@ -105,4 +111,7 @@ os_pkgs: unzip: {} xfsprogs: {} -python_pkgs: {} +python_pkgs: + kubernetes.core: + groups: + - kube_control_plane From e1f2ee1c0ded41491832e25f6d9fc9d9aa131a31 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Fri, 3 May 2024 16:18:52 +0200 Subject: [PATCH 7/8] Add workaround for centos7 python_pkgs installations CentOS 7 ships with python2 and does not handle well the infrastructure introduced in 625ef33f5 (Add install infra for python_pkgs, 2024-05-02) Include a workaround for this. This is kept as a separate commit to be easily revertible, as CentOS 7 EOL is 30/06/2024 (= in less than two months) --- .../kubernetes/preinstall/tasks/0070-system-packages.yml | 4 +++- roles/kubernetes/preinstall/vars/main.yml | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml index 99ce2c98828..bec786423c4 100644 --- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml +++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml @@ -105,6 +105,8 @@ vars: full_query: "[? value | ( {{ filters_groups }} ) ].key" pkgs: "{{ python_pkgs }}" + python: "{{ (ansible_facts.python.version.major != 3) | ternary('/usr/bin/python3', ansible_facts.python.executable) }}" + # Workaround for CentOS7 when: - to_install | length != 0 ansible.builtin.pip: @@ -112,4 +114,4 @@ extra_args: "--only-binary :all: --no-deps --require-hashes" virtualenv_site_packages: true virtualenv: "{{ kubespray_virtualenvs_base }}/{{ item }}" - virtualenv_command: "{{ ansible_facts.python.executable }} -m venv" + virtualenv_command: "{{ python }} -m venv" diff --git a/roles/kubernetes/preinstall/vars/main.yml b/roles/kubernetes/preinstall/vars/main.yml index 3ef0ecfc2e0..95275598e2e 100644 --- a/roles/kubernetes/preinstall/vars/main.yml +++ b/roles/kubernetes/preinstall/vars/main.yml @@ -88,6 +88,14 @@ os_pkgs: nss: *redhat_family openssl: {} python-apt: *deb_10 + python-setuptools: ¢os_python + os: + distributions: + CentOS: + major_versions: + - "7" + groups: + - kube_control_plane # TODO: not for debian 10 python3-apt: *debian_family_base python3-libselinux: @@ -98,6 +106,7 @@ os_pkgs: - "8" - "9" CentOS: *major_redhat_like + python3-pip: *centos_python python3-venv: os: families: From 6913611c1e2f9f08d816c6599c70ae46bffa02b2 Mon Sep 17 00:00:00 2001 From: Max Gautier Date: Thu, 7 Dec 2023 16:43:05 +0100 Subject: [PATCH 8/8] Switch to kubernetes.core.k8s for DNS manifests Besides the problem with client-side apply explained in previous commit, we reduce ansible overhead by using the template feature of kubernetes.core.k8s, which let us supply a list of templates directly, which are applied all at once. This considerably reduces the ansible overhead (task scheduling) --- roles/kubernetes-apps/ansible/tasks/main.yml | 51 +++++-------------- .../ansible/tasks/nodelocaldns.yml | 33 ------------ roles/kubernetes-apps/ansible/vars/main.yml | 24 +++++++++ 3 files changed, 37 insertions(+), 71 deletions(-) delete mode 100644 roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index e3d82f10699..9cffd48f701 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -11,48 +11,23 @@ delay: 1 when: inventory_hostname == groups['kube_control_plane'][0] -- name: Kubernetes Apps | CoreDNS - import_tasks: "coredns.yml" - when: - - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - coredns - -- name: Kubernetes Apps | nodelocalDNS - import_tasks: "nodelocaldns.yml" - when: - - enable_nodelocaldns - - inventory_hostname == groups['kube_control_plane'] | first - tags: - - nodelocaldns - -- name: Kubernetes Apps | Start Resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: - - "{{ coredns_manifests.results | default({}) }}" - - "{{ coredns_secondary_manifests.results | default({}) }}" - - "{{ nodelocaldns_manifests.results | default({}) }}" - - "{{ nodelocaldns_second_manifests.results | default({}) }}" - when: - - dns_mode != 'none' - - inventory_hostname == groups['kube_control_plane'][0] - - not item is skipped - register: resource_result - until: resource_result is succeeded - retries: 4 +- name: Kubernetes Apps | DNS + vars: + ansible_python_interpreter: "{{ kubespray_virtualenvs_base }}/kubernetes.core/bin/python" + kubernetes.core.k8s: + state: present + apply: true + server_side_apply: + field_manager: kubespray + force_conflicts: true + template: "{{ all_manifests_lists | flatten }}" + register: result + until: result is succeeded + retries: 10 delay: 5 tags: - coredns - nodelocaldns - loop_control: - label: "{{ item.item.file }}" - name: Kubernetes Apps | Etcd metrics endpoints import_tasks: etcd_metrics.yml diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml deleted file mode 100644 index b6de7cc4a5d..00000000000 --- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Kubernetes Apps | Lay Down nodelocaldns Template - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: 0644 - with_items: - - { name: nodelocaldns, file: nodelocaldns-config.yml, type: configmap } - - { name: nodelocaldns, file: nodelocaldns-sa.yml, type: sa } - - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset } - register: nodelocaldns_manifests - when: - - enable_nodelocaldns - - inventory_hostname == groups['kube_control_plane'] | first - tags: - - nodelocaldns - - coredns - -- name: Kubernetes Apps | Lay Down nodelocaldns-secondary Template - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: 0644 - with_items: - - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset } - register: nodelocaldns_second_manifests - when: - - enable_nodelocaldns - - enable_nodelocaldns_secondary - - inventory_hostname == groups['kube_control_plane'] | first - tags: - - nodelocaldns - - coredns diff --git a/roles/kubernetes-apps/ansible/vars/main.yml b/roles/kubernetes-apps/ansible/vars/main.yml index 47b9681d064..3dd72cc9996 100644 --- a/roles/kubernetes-apps/ansible/vars/main.yml +++ b/roles/kubernetes-apps/ansible/vars/main.yml @@ -12,3 +12,27 @@ primary_dns_by_mode: nodelocaldns_forward_target: "{{ ([primary_dns_by_mode[dns_mode]] + (dns_mode == 'coredns_dual') | ternary([skydns_server_secondary], [])) | join(' ') }}" nodelocaldns_upstream_forward_target: "{{ upstream_dns_servers | d([]) | join(' ') | d('/etc/resolv.conf', true) }}" + +coredns_manifests: +- coredns-clusterrole.yml.j2 +- coredns-clusterrolebinding.yml.j2 +- coredns-config.yml.j2 +- coredns-deployment.yml.j2 +- coredns-sa.yml.j2 +- coredns-svc.yml.j2 +- dns-autoscaler.yml.j2 +- dns-autoscaler-clusterrole.yml.j2 +- dns-autoscaler-clusterrolebinding.yml.j2 +- dns-autoscaler-sa.yml.j2 +- "{{ coredns_pod_disruption_budget | ternary('coredns-poddisruptionbudget.yml.j2', '') }}" + +nodelocaldns_manifests: +- nodelocaldns-config.yml.j2 +- nodelocaldns-sa.yml.j2 +- nodelocaldns-daemonset.yml.j2 + +all_apps: +- coredns +all_manifests_lists: +- "{{ (dns_mode in ['coredns', 'coredns_dual'] and ansible_run_tags | intersect(['all', 'coredns']) | length > 0 and 'coredns' not in ansible_skip_tags) | ternary(coredns_manifests | select, []) }}" +- "{{ (enable_nodelocaldns and ansible_run_tags | intersect(['all', 'coredns', 'nodelocaldns']) | length > 0 and ansible_skip_tags | intersect(['coredns', 'nodelocaldns']) | length == 0) | ternary(nodelocaldns_manifests, []) }}"