From 5fb2e5f6934899bef0e743e58d1aa52e3ed4c1ef Mon Sep 17 00:00:00 2001 From: ilia1243 <8808144+ilia1243@users.noreply.github.com> Date: Wed, 25 Jan 2023 13:06:56 +0300 Subject: [PATCH] MANOPD-83734 Actualize and ensure mandatory thirdparty packages (#338) * Audit installation refactoring * Install audit on both rhel and debian * Mandatory packages implementation * Added and changed documentation. * Auto-update license header * Minor fixes * Remove mandatory packages from packages.include of CI inventories. * Fix verify_shell where target nodes are not explicitly provided * Add unit tests * Auto-update license header * Clarify TODO for zip thirdparties Co-authored-by: ilia1243 --- ci/default_config.yaml | 7 - ci/extended_config.yaml | 8 +- documentation/Installation.md | 144 ++++++++--- documentation/Kubecheck.md | 127 ++++++--- documentation/Maintenance.md | 2 +- kubemarine/apt.py | 14 +- kubemarine/audit.py | 136 ++++------ kubemarine/core/cluster.py | 39 +-- kubemarine/core/defaults.py | 1 + kubemarine/core/executor.py | 42 ++- kubemarine/core/group.py | 10 +- kubemarine/packages.py | 234 +++++++++++------ kubemarine/patches/__init__.py | 2 + .../patches/p1_mandatory_packages_off.py | 50 ++++ kubemarine/plugins/__init__.py | 10 +- kubemarine/procedures/add_node.py | 2 +- kubemarine/procedures/check_iaas.py | 51 +--- kubemarine/procedures/check_paas.py | 62 +++-- kubemarine/procedures/do.py | 2 +- kubemarine/procedures/install.py | 28 +- kubemarine/procedures/upgrade.py | 4 +- .../resources/configurations/defaults.yaml | 58 ++--- .../resources/configurations/globals.yaml | 57 +++++ .../definitions/services/packages.json | 60 +++-- .../services/packages/associations.json | 71 +++-- kubemarine/resources/schemas/migrate_cri.json | 2 +- kubemarine/resources/schemas/upgrade.json | 9 +- kubemarine/system.py | 2 +- kubemarine/thirdparties.py | 51 +++- kubemarine/yum.py | 17 +- test/unit/core/test_executor.py | 50 ++++ test/unit/test_audit.py | 49 ++-- test/unit/test_haproxy.py | 10 +- test/unit/test_install.py | 104 ++++++++ test/unit/test_keepalived.py | 8 +- test/unit/test_packages.py | 242 +++++++++++++++--- 36 files changed, 1225 insertions(+), 540 deletions(-) create mode 100644 kubemarine/patches/p1_mandatory_packages_off.py create mode 100644 test/unit/test_install.py diff --git a/ci/default_config.yaml b/ci/default_config.yaml index e47b6c6a0..f9f224e53 100644 --- a/ci/default_config.yaml +++ b/ci/default_config.yaml @@ -9,10 +9,3 @@ nodes: roles: ["control-plane", "worker"] cluster_name: "test-local-k8s.com" - -services: - packages: - install: - - conntrack - - kmod - - curl diff --git a/ci/extended_config.yaml b/ci/extended_config.yaml index a370513a9..50e1ba31c 100644 --- a/ci/extended_config.yaml +++ b/ci/extended_config.yaml @@ -18,15 +18,9 @@ cluster_name: "test-local-k8s.com" services: packages: install: - - conntrack - - kmod - ethtool - ebtables - socat - - curl - - openssl - - unzip - - policycoreutils-python-utils plugins: kubernetes-dashboard: @@ -54,4 +48,4 @@ rbac: namespace: kube-system admission: pss pss: - pod-security: enabled \ No newline at end of file + pod-security: enabled diff --git a/documentation/Installation.md b/documentation/Installation.md index 9ec21e6f4..53f706a6c 100644 --- a/documentation/Installation.md +++ b/documentation/Installation.md @@ -210,20 +210,10 @@ If you have other solution, remove or switch off the IP firewall before the inst **Preinstalled software** -* Mandatory: - * curl - * OpenSSL library - * kmod - * semanage - * conntrack - * audit - * unzip. By default it is not required. Install if you intend to unzip third-party files with **.zip** extension. -* Recommended - Installation of the below packages is highly recommended; however, Kubernetes is able to work without them, but may show warnings: +* Installation of the below packages is highly recommended; however, Kubernetes is able to work without them, but may show warnings: * ethtool * ebtables * socat - * policycoreutils-python **Warning**: You have to specify packages names in "RPM format" if it is possible for you OS, For example, specify `conntrack-tools` instead of `conntrack`. @@ -1655,7 +1645,42 @@ services: *OS specific*: Yes, the necessary package manager is selected for different OS families. -By default, the installer does not install any packages from the package manager. However, if you need it, you can manage the packages directly during installation. +###### mandatory + +By default, the installer installs predefined list of mandatory packages from the package manager. The list of mandatory packages is the following: +* conntrack +* iptables +* curl +* openssl +* unzip +* semanage +* kmod + +Exact package names are detected automatically depending on the OS family of the cluster. +For more information, see [associations](#associations). + +**Warning**: Make sure to have all the mandatory packages available in the repositories. +You can configure the necessary repositories in the [package_manager](#package_manager) section of inventory. + +Most of the mandatory packages are installed on all nodes with the following exceptions: +* conntrack and iptables are installed only on control-plane and worker nodes. +* unzip is installed only on nodes that require thirdparties that are packed in .zip archives. + For more information, see **unpack** option in [thirdparties](#thirdparties). +* semanage is installed only on RHEL nodes. + +If you need to turn some mandatory packages off for some reason, +this can be done in `services.packages.mandatory` section. For example: + +```yaml +services: + packages: + mandatory: + conntrack: false +``` + +###### custom + +If you need other custom packages, you can manage them directly during installation. You can choose any one action from the following types of actions: * remove @@ -1679,10 +1704,6 @@ services: - ethtool - ebtables - socat - - curl - - openssl - - unzip - - policycoreutils-python ``` The following is an example to install, upgrade, and remove packages: @@ -1691,12 +1712,11 @@ The following is an example to install, upgrade, and remove packages: services: packages: remove: - - curl + - socat install: - - unzip - - policycoreutils-python + - ebtables upgrade: - - openssl + - ethtool ``` The format of package definition is same as in the package manager. You can specify the exact version of package to install: @@ -1705,8 +1725,8 @@ The format of package definition is same as in the package manager. You can spec services: packages: install: - - openssl-1.0 - - unzip-1.1 + - ebtables-2.0.* + - ethtool-4.* ``` To update all packages, you can use an asterisk. For example: @@ -1853,6 +1873,41 @@ The following associations are used by default: config_location /etc/audit/rules.d/predefined.rules + + conntrack + package_name + conntrack-tools + + + iptables + package_name + iptables + + + openssl + package_name + openssl + + + curl + package_name + curl + + + unzip + package_name + unzip + + + kmod + package_name + kmod + + + semanage + package_name + policycoreutils-python + @@ -1949,6 +2004,36 @@ The following associations are used by default: config_location /etc/audit/rules.d/predefined.rules + + conntrack + package_name + conntrack + + + iptables + package_name + iptables + + + openssl + package_name + openssl + + + curl + package_name + curl + + + unzip + package_name + unzip + + + kmod + package_name + kmod + **Notes**: @@ -2374,7 +2459,7 @@ services: *Can restart service*: Always yes, `auditd`. -*OS specific*: Yes, `prepare.system.audit.install` task is performed only on the Debian OS family. +*OS specific*: No ```yaml services: @@ -4655,7 +4740,6 @@ Application of the list merge strategy is allowed in the following sections: * `services.packages.install` * `services.packages.upgrade` * `services.packages.remove` -* `services.packages.package_manager.repositories` * `plugins.nginx-ingress-controller.ports` * `plugins.kubernetes-dashboard.ingress.spec.tls` * `plugins.kubernetes-dashboard.ingress.spec.rules` @@ -4809,7 +4893,7 @@ The following is the installation tasks tree: * **modprobe** - Configures Linux Kernel modules. For more information about parameters for this task, see [modprobe](#modprobe). * **sysctl** - Configures Linux Kernel parameters. For more information about parameters for this task, see [sysctl](#sysctl). * **audit** - * **install** - Installs auditd daemon on Ubuntu/Debian nodes. + * **install** - Installs auditd daemon on nodes. * **configure_daemon** - Configures Linux audit rules. For more information about parameters for this task, see [audit-daemon](#audit-daemon). * **configure_policy** - Configures Kubernetes audit rules. For more information about parameters for this task, see [audit-Kubernetes Policy](#audit-Kubernetes-Policy) @@ -4996,13 +5080,12 @@ services: - containerd.io-1.4.6* service_name: 'docker' config_location: '/etc/docker/daemon.json' + conntrack: + package_name: conntrack-tools install: - - conntrack - ethtool - ebtables - socat - - unzip - - policycoreutils-python-utils ``` The above configuration is converted to the following finalized configuration, provided that the cluster is based on RHEL nodes: @@ -5020,14 +5103,13 @@ services: - containerd.io-1.4.6-3.1.el7.x86_64 service_name: 'docker' config_location: '/etc/docker/daemon.json' + conntrack: + package_name: conntrack-tools-1.4.4-7.el7.x86_64 install: include: - - conntrack - ethtool-4.8-10.el7.x86_64 - ebtables-2.0.10-16.el7.x86_64 - socat-1.7.3.2-2.el7.x86_64 - - unzip-6.0-21.el7.x86_64 - - policycoreutils-python-utils ``` **Note**: Some of the packages are impossible to be detected in the system, therefore such packages remain unchanged. diff --git a/documentation/Kubecheck.md b/documentation/Kubecheck.md index 0d1fc07f8..cffead3b0 100644 --- a/documentation/Kubecheck.md +++ b/documentation/Kubecheck.md @@ -14,13 +14,13 @@ This section provides information about the Kubecheck functionality. - [005 Workers Amount](#005-workers-amount) - [005 Total Nodes Amount](#005-total-nodes-amount) - [006 VCPUs Amount](#006-vcpus-amount) - - [006 VCPUs Amount - Balancers](#006-vcpus-amount-balancers) - - [006 VCPUs Amount - Control-planes](#006-vcpus-amount-control-planes) - - [006 VCPUs Amount - Workers](#006-vcpus-amount-workers) + - [006 VCPUs Amount - Balancers](#006-vcpus-amount---balancers) + - [006 VCPUs Amount - Control-planes](#006-vcpus-amount---control-planes) + - [006 VCPUs Amount - Workers](#006-vcpus-amount---workers) - [007 RAM Amount](#007-ram-amount) - - [007 RAM Amount - Balancers](#007-ram-amount-balancers) - - [007 RAM Amount - Control-planes](#007-ram-amount-control-planes) - - [007 RAM Amount - Workers](#007-ram-amount-workers) + - [007 RAM Amount - Balancers](#007-ram-amount---balancers) + - [007 RAM Amount - Control-planes](#007-ram-amount---control-planes) + - [007 RAM Amount - Workers](#007-ram-amount---workers) - [008 Distributive](#008-distributive) - [009 PodSubnet](#009-podsubnet) - [010 ServiceSubnet](#010-servicesubnet) @@ -34,22 +34,26 @@ This section provides information about the Kubecheck functionality. - [201 Keepalived Status](#201-keepalived-status) - [201 Container Runtime Status](#201-container-runtime-status) - [201 Kubelet Status](#201-kubelet-status) - - [202 Kubelet Version](#202-kubelet-version) - - [203 Recommended packages versions](#203-recommended-packages-version) - - [204 Docker Version](#204-cri-versions) - - [204 HAproxy Version](#204-haproxy-version) - - [204 Keepalived Version](#204-keepalived-version) - - [205 Generic Packages Version](#205-generic-packages-version) - - [206 Pods Condition](#206-pods-condition) - - [207 Dashboard Availability](#207-dashboard-availability) - - [208 Nodes Existence](#208-nodes-existence) - - [209 Nodes Roles](#209-nodes-roles) - - [210 Nodes Condition](#210-nodes-condition) - - [210 Nodes Condition - NetworkUnavailable](#210-nodes-condition-networkunavailable) - - [210 Nodes Condition - MemoryPressure](#210-nodes-condition-memorypressure) - - [210 Nodes Condition - DiskPressure](#210-nodes-condition-diskpressure) - - [210 Nodes Condition - PIDPressure](#210-nodes-condition-pidpressure) - - [210 Nodes Condition - Ready](#210-nodes-condition-ready) + - [202 Nodes pid_max](#202-nodes-pid_max) + - [203 Kubelet Version](#203-kubelet-version) + - [204 Recommended packages versions](#204-recommended-packages-version) + - [205 System packages versions](#205-system-packages-version) + - [205 CRI Versions](#205-cri-versions) + - [205 HAproxy Version](#205-haproxy-version) + - [205 Keepalived Version](#205-keepalived-version) + - [205 Audit Version](#205-audit-version) + - [205 Mandatory Package Versions](#205-mandatory-package-versions) + - [206 Generic Packages Version](#206-generic-packages-version) + - [207 Pods Condition](#207-pods-condition) + - [208 Dashboard Availability](#208-dashboard-availability) + - [209 Nodes Existence](#209-nodes-existence) + - [210 Nodes Roles](#210-nodes-roles) + - [211 Nodes Condition](#211-nodes-condition) + - [211 Nodes Condition - NetworkUnavailable](#211-nodes-condition---networkunavailable) + - [211 Nodes Condition - MemoryPressure](#211-nodes-condition---memorypressure) + - [211 Nodes Condition - DiskPressure](#211-nodes-condition---diskpressure) + - [211 Nodes Condition - PIDPressure](#211-nodes-condition---pidpressure) + - [211 Nodes Condition - Ready](#211-nodes-condition---ready) - [213 Selinux security policy](#213-selinux-security-policy) - [214 Selinux configuration](#214-selinux-configuration) - [215 Firewalld status](#215-firewalld-status) @@ -324,6 +328,21 @@ The PAAS procedure verifies the platform solution. For example, it checks the he The task tree is as follows: * services + * security + * selinux + * status + * config + * apparmor + * status + * config + * firewalld + * status + * system + * time + * swap + * status + * modprobe + * rules * haproxy * status * keepalived @@ -332,8 +351,24 @@ The task tree is as follows: * status * kubelet * status + * configuration + * version + * packages + * system + * recommended_versions + * cri_version + * haproxy_version + * keepalived_version + * audit_version + * mandatory_versions + * generic + * version +* thirdparties + * hashes * kubernetes - * version + * pods + * plugins + * dashboard * nodes * existence * roles @@ -343,6 +378,18 @@ The task tree is as follows: * disk * pid * ready + * admission +* etcd + * health_status +* control_plane + * configuration_status + * health_status +* default_services + * configuration_status + * health_status +* calico + * config_check +* geo_check ##### 201 Service Status @@ -386,31 +433,47 @@ This test checks the Kubelet version on all hosts in a cluster. ##### 204 Recommended Packages Version -*Task*: `packages.system.recommened_versions` +*Task*: `services.packages.system.recommended_versions` This test checks that system package versions in the inventory are recommended. -##### 205 CRI Versions +##### 205 System Packages Version + +Tests of this type check that the system packages are installed and have equal versions. + +###### 205 CRI Versions -*Task*: `packages.system.cri_version` +*Task*: `services.packages.system.cri_version` This test checks that the configured CRI package is installed on all nodes and has an equal version. -##### 205 HAproxy Version +###### 205 HAproxy Version -*Task*: `packages.system.haproxy` +*Task*: `services.packages.system.haproxy_version` This test checks that the configured HAproxy package is installed on all nodes and has an equal version. -##### 205 Keepalived Version +###### 205 Keepalived Version -*Task*: `packages.system.keepalived` +*Task*: `services.packages.system.keepalived_version` This test checks that the configured Keepalived package is installed on all nodes and has an equal version. +###### 205 Audit Version + +*Task*: `services.packages.system.audit_version` + +This test checks that the configured Audit package is installed on all nodes and has an equal version. + +###### 205 Mandatory Package Versions + +*Task*: `services.packages.system.mandatory_versions` + +This test checks that the configured mandatory packages are installed on all nodes and have equal versions. + ##### 206 Generic Packages Version -*Task*: `packages.generic.versions` +*Task*: `services.packages.generic.version` This test checks that the configured generic packages are installed on all nodes and have equal versions. @@ -569,7 +632,7 @@ The test checks status of Pod Security Admissions, default PSS(Pod Security Stan ###### 226 Geo connectivity status -*Task*: `geo_monitor` +*Task*: `geo_check` The task checks status of DNS resolving, pod-to-service and pod-to-pod connectivity between cluster in geographically distributed schemas. This task works only if procedure config file is provided with information about `paas-geo-monitor`, diff --git a/documentation/Maintenance.md b/documentation/Maintenance.md index f3e8ba827..71f39b665 100644 --- a/documentation/Maintenance.md +++ b/documentation/Maintenance.md @@ -228,7 +228,7 @@ This task is executed to restore the required CoreDNS configuration. #### Packages Upgrade Section and Task -This inventory section contains the configuration to upgrade custom and system packages, such as docker, containerd, haproxy, and keepalived. The system packages are upgraded by default, if necessary. You can influence the system packages' upgrade and specify custom packages for the upgrade/installation/removal using the `packages` section as follows: +This inventory section contains the configuration to upgrade custom and system packages, such as docker, containerd, and podman. The system packages are upgraded by default, if necessary. You can influence the system packages' upgrade and specify custom packages for the upgrade/installation/removal using the `packages` section as follows: ```yaml v1.18.8: diff --git a/kubemarine/apt.py b/kubemarine/apt.py index 358074c70..6a7e7c0a7 100644 --- a/kubemarine/apt.py +++ b/kubemarine/apt.py @@ -55,10 +55,7 @@ def clean(group, **kwargs) -> NodeGroupResult: return group.sudo(DEBIAN_HEADERS + "apt clean", **kwargs) -def install(group, include=None, exclude=None, **kwargs) -> NodeGroupResult: - if include is None: - raise Exception('You must specify included packages to install') - +def get_install_cmd(include: str or list, exclude=None) -> str: if isinstance(include, list): include = ' '.join(include) command = DEBIAN_HEADERS + 'apt update && ' + \ @@ -69,6 +66,15 @@ def install(group, include=None, exclude=None, **kwargs) -> NodeGroupResult: exclude = ','.join(exclude) command += ' --exclude=%s' % exclude + return command + + +def install(group, include=None, exclude=None, **kwargs) -> NodeGroupResult: + if include is None: + raise Exception('You must specify included packages to install') + + command = get_install_cmd(include, exclude) + return group.sudo(command, **kwargs) # apt fails to install (downgrade) package if it is already present and has higher version, # thus we do not need additional checks here (in contrast to yum) diff --git a/kubemarine/audit.py b/kubemarine/audit.py index cadc18409..dbf2d4dc1 100644 --- a/kubemarine/audit.py +++ b/kubemarine/audit.py @@ -21,124 +21,90 @@ from kubemarine import system, packages from kubemarine.core import utils -from kubemarine.core.annotations import restrict_multi_os_group +from kubemarine.core.cluster import KubernetesCluster from kubemarine.core.executor import RemoteExecutor from kubemarine.core.group import NodeGroup, NodeGroupResult -def is_audit_rules_defined(inventory) -> bool: - """ - Checks for the presence of the specified audit rules in the inventory - :param inventory: Cluster inventory, where the rules will be checked - :return: Boolean - """ - rules = inventory['services'].get('audit', {}).get('rules') - return rules is not None +def verify_inventory(inventory: dict, cluster: KubernetesCluster) -> dict: + for host in cluster.nodes['all'].get_final_nodes().get_hosts(): + package_name = cluster.get_package_association_for_node(host, 'audit', 'package_name') + if isinstance(package_name, str): + package_name = [package_name] + + if len(package_name) != 1: + os_family = cluster.get_os_family_for_node(host) + raise Exception(f'Audit has multiple associated packages {package_name} for OS {os_family!r} ' + f'that is currently not supported') + + return inventory -@restrict_multi_os_group -def install(group: NodeGroup, enable_service: bool = True, force: bool = False) -> NodeGroupResult or None: +def install(group: NodeGroup) -> str or None: """ Automatically installs and enables the audit service for the specified nodes :param group: Nodes group on which audit installation should be performed - :param enable_service: Flag, automatically enables the service after installation - :param force: A flag that causes a forced installation even on centos nodes and nodes where the audit is already - installed :return: String with installation output from nodes or None, when audit installation was skipped """ cluster = group.cluster log = cluster.log - if not is_audit_rules_defined(cluster.inventory): - log.debug('Skipped - no audit rules in inventory') - return - - # This method handles cluster with multiple os, exceptions should be suppressed - if not force and group.get_nodes_os() in ['rhel', 'rhel8']: - log.debug('Auditd installation is not required on RHEL nodes') - return + log.verbose('Searching for already installed auditd package...') - install_group = group + # Reduce nodes amount for installation + hosts_to_packages = packages.get_association_hosts_to_packages(group, cluster.inventory, 'audit') - if not force: - log.verbose('Searching for already installed auditd package...') - debian_group = group.get_subgroup_with_os('debian') - debian_package_name = cluster.get_package_association_str_for_group(debian_group, 'audit', 'package_name') - if isinstance(debian_package_name, list): - raise Exception(f'Audit can not be installed, because nodes already contains different package versions: ' - f'{str(debian_package_name)}') - audit_installed_results = packages.detect_installed_package_version(debian_group, debian_package_name) - log.verbose(audit_installed_results) + not_installed_hosts = [] + audit_installed_results = packages.detect_installed_packages_version_hosts(cluster, hosts_to_packages) + for detected_audit_versions in audit_installed_results.values(): + for detected_version, hosts in detected_audit_versions.items(): + log.verbose(f'{detected_version}: {hosts}') + if 'not installed' in detected_version: + not_installed_hosts.extend(hosts) - # Reduce nodes amount for installation - install_group = audit_installed_results.get_nodes_group_where_value_in_stderr("no packages found matching") - - if install_group.nodes_amount() == 0: - log.debug('Auditd is already installed on all nodes') - return - else: - log.debug('Auditd package is not installed, installing...') - - package_name = cluster.get_package_association_str_for_group(install_group, 'audit', 'package_name') + if not not_installed_hosts: + log.debug('Auditd is already installed on all nodes') + return + else: + log.debug(f'Auditd package is not installed on {not_installed_hosts}, installing...') with RemoteExecutor(cluster) as exe: - packages.install(install_group, include=package_name) - if enable_service: - enable(install_group) - - return exe.get_last_results_str() - + for host in not_installed_hosts: + the_node = cluster.make_group([host]) -@restrict_multi_os_group -def enable(group: NodeGroup, now: bool = True) -> NodeGroupResult: - """ - Enables and optionally starts the audit service for the specified nodes - :param group: Nodes group, where audit service should be enabled - :param now: Flag indicating that the audit service should be started immediately - :return: NodeGroupResult of enabling output from nodes - """ - cluster = group.cluster - - service_name = cluster.get_package_association_str_for_group(group, 'audit', 'service_name') - return system.enable_service(group, name=service_name, now=now) + package_name = cluster.get_package_association_for_node(host, 'audit', 'package_name') + packages.install(the_node, include=package_name) + service_name = cluster.get_package_association_for_node(host, 'audit', 'service_name') + system.enable_service(the_node, name=service_name) -@restrict_multi_os_group -def restart(group: NodeGroup) -> NodeGroupResult: - """ - Restarts the audit service for the specified nodes - :param group: Nodes group, where audit service should be restarted - :return: Service restart NodeGroupResult - """ - cluster = group.cluster - - service_name = cluster.get_package_association_str_for_group(group, 'audit', 'service_name') - return group.sudo(f'service {service_name} restart') + return exe.get_last_results_str() -@restrict_multi_os_group -def apply_audit_rules(group: NodeGroup, now: bool = True) -> NodeGroupResult or None: +def apply_audit_rules(group: NodeGroup) -> NodeGroupResult: """ Generates and applies audit rules to the group :param group: Nodes group, where audit service should be configured - :param now: Flag indicating that the audit service should be restarted immediately :return: Service restart result or nothing if audit rules are non exists, or restart is not required """ cluster = group.cluster log = cluster.log - if not is_audit_rules_defined(group.cluster.inventory): - log.debug('Skipped - no audit rules in inventory') - return - log.debug('Applying audit rules...') - rules_content = " \n".join(group.cluster.inventory['services']['audit']['rules']) + rules_content = " \n".join(cluster.inventory['services']['audit']['rules']) + utils.dump_file(cluster, rules_content, 'audit.rules') + + restart_tokens = [] + with RemoteExecutor(cluster) as exe: + for node in group.get_ordered_members_list(provide_node_configs=True): + the_node: NodeGroup = node['connection'] + host: str = node['connect_to'] - rules_config_location = cluster.get_package_association_str_for_group(group, 'audit', 'config_location') + rules_config_location = cluster.get_package_association_for_node(host, 'audit', 'config_location') + the_node.put(io.StringIO(rules_content), rules_config_location, + sudo=True, backup=True) - utils.dump_file(group.cluster, rules_content, 'audit.rules') - group.put(io.StringIO(rules_content), rules_config_location, - sudo=True, backup=True) + service_name = cluster.get_package_association_for_node(host, 'audit', 'service_name') + restart_tokens.append(the_node.sudo(f'service {service_name} restart')) - if now: - return restart(group) + return exe.get_merged_nodegroup_results(restart_tokens) diff --git a/kubemarine/core/cluster.py b/kubemarine/core/cluster.py index 4a2051de5..ac072fe89 100755 --- a/kubemarine/core/cluster.py +++ b/kubemarine/core/cluster.py @@ -119,7 +119,7 @@ def make_group_from_nodes(self, node_names: List[str]) -> NodeGroup: return self.make_group(ips) def create_group_from_groups_nodes_names(self, groups_names: List[str], nodes_names: List[str]) -> NodeGroup: - common_group = None + common_group = self.make_group([]) if nodes_names: common_group = self.make_group_from_nodes(nodes_names) @@ -131,10 +131,7 @@ def create_group_from_groups_nodes_names(self, groups_names: List[str], nodes_na self.log.verbose('Group \'%s\' is requested for usage, but this group is not exists.' % group) continue - if common_group is None: - common_group = self.nodes[group] - else: - common_group = common_group.include_group(self.nodes[group]) + common_group = common_group.include_group(self.nodes[group]) return common_group @@ -318,38 +315,6 @@ def get_package_association_for_node(self, host: str, package: str, association_ os_family = self.get_os_family_for_node(host) return self._get_package_associations_for_os(os_family, package, association_key) - def get_package_association_for_group(self, group: NodeGroup, package: str, association_key: str) -> dict: - """ - Returns the specified association dict for the specified package from inventory for entire NodeGroup. - - :param group: NodeGroup for which required to find the association - :param package: The package name to get the association for - :param association_key: Association key to get - :return: Association values for every host in group, e.g. { host -> value } - """ - results = {} - for node in group.get_ordered_members_list(provide_node_configs=True): - association_value = self.get_package_association_for_node(node['connect_to'], package, association_key) - results[node['connect_to']] = association_value - return results - - def get_package_association_str_for_group(self, group: NodeGroup, - package: str, association_key: str) -> str or list: - """ - Returns the specified association string or list for the specified package from inventory for entire NodeGroup. - If association value is different between some nodes, an exception will be thrown. - - :param group: NodeGroup for which required to find the association - :param package: The package name to get the association for - :param association_key: Association key to get - :return: Association string or list value - """ - results = self.get_package_association_for_group(group, package, association_key) - results_values = list(set(results.values())) - if len(results_values) == 1: - return results_values[0] - raise Exception(f'Too many values returned for package associations str "{association_key}" for package "{package}"') - def make_finalized_inventory(self): from kubemarine.core import defaults from kubemarine.procedures import remove_node diff --git a/kubemarine/core/defaults.py b/kubemarine/core/defaults.py index 8cc532658..9063a58f9 100755 --- a/kubemarine/core/defaults.py +++ b/kubemarine/core/defaults.py @@ -61,6 +61,7 @@ "kubemarine.system.verify_inventory", "kubemarine.system.enrich_etc_hosts", "kubemarine.packages.enrich_inventory_include_all", + "kubemarine.audit.verify_inventory", "kubemarine.plugins.enrich_inventory", "kubemarine.plugins.verify_inventory", "kubemarine.coredns.enrich_add_hosts_config", diff --git a/kubemarine/core/executor.py b/kubemarine/core/executor.py index 861b57af3..7e9b22d04 100644 --- a/kubemarine/core/executor.py +++ b/kubemarine/core/executor.py @@ -14,7 +14,7 @@ import random import time -from typing import Tuple, List, Dict, Callable, Any +from typing import Tuple, List, Dict, Callable, Any, Optional from contextvars import Token, ContextVar import fabric @@ -36,11 +36,15 @@ def __init__(self, cluster, enforce_children=False, timeout=None): self.cluster = cluster - self.warn = warn + # TODO revise necessity of warn option + self.warn = False self.lazy = lazy self.parallel = parallel - self.ignore_failed = ignore_failed - self.enforce_children = enforce_children + # TODO support ignore_failed option. + # Probably it should be chosen automatically depending on warn=? of commands kwargs (not of the same executor option). + self.ignore_failed = False + # TODO revise this option + self.enforce_children = False self.timeout = timeout self.connections_queue: Dict[Connection, List[Tuple]] = {} self._last_token = -1 @@ -99,13 +103,13 @@ def reparse_results(self, results, batch): action, callbacks, tokens = batch_no_cnx[host] if isinstance(result, fabric.runners.Result) and executor.command_separator in result.stdout and executor.command_separator in result.stderr: - stderrs = result.stderr.strip().split(executor.command_separator) - raw_stdouts = result.stdout.strip().split(executor.command_separator) + stderrs = result.stderr.split(executor.command_separator + '\n') + raw_stdouts = result.stdout.split(executor.command_separator + '\n') stdouts = [] exit_codes = [] i = 0 while i < len(raw_stdouts): - stdouts.append(raw_stdouts[i].strip()) + stdouts.append(raw_stdouts[i]) if i + 1 < len(raw_stdouts): exit_codes.append(int(raw_stdouts[i + 1].strip())) i += 2 @@ -177,6 +181,8 @@ def _get_callables(self): return batches def queue(self, target, action: Tuple, callback: Callable = None) -> int or dict: + # TODO support callbacks + callback = None executor = self._get_active_executor() executor._last_token = token = executor._last_token + 1 @@ -208,12 +214,14 @@ def get_last_results(self): return None return executor.results[-1] - def get_merged_nodegroup_results(self): + def get_merged_nodegroup_results(self, filter_tokens: Optional[List[int]] = None): """ Merges last tokenized results into NodeGroupResult. - The method is useful to check exceptions, or to check result in case only one command per node is executed. + The method is useful to check exceptions, to check result in case only one command per node is executed, + or to check result of specific commands in the batch if filter_tokens parameter is provided. + :param filter_tokens: tokens to filter result of specific commands in the batch :return: None or NodeGroupResult """ # TODO: Get rid of this WA import, added to avoid circular import problem @@ -229,14 +237,20 @@ def get_merged_nodegroup_results(self): "exited": 0 } object_result = None + + tokens = host_results.keys() if filter_tokens is None else filter_tokens for token, result in host_results.items(): + if isinstance(result, Exception): + object_result = result + continue + elif token not in tokens: + continue + if isinstance(result, fabric.runners.Result): if result.stdout: - merged_result['stdout'] = result.stdout if not merged_result['stdout'] \ - else merged_result['stdout'] + '\n\n' + result.stdout + merged_result['stdout'] += result.stdout if result.stderr: - merged_result['stderr'] = result.stderr if not merged_result['stderr'] \ - else merged_result['stderr'] + '\n\n' + result.stderr + merged_result['stderr'] += result.stderr # Exit codes can not be merged, that's why they are assigned by priority: # 1. Most important code is 1, it should be assigned if any results contains it @@ -301,7 +315,7 @@ def get_last_results_str(self): if output != "": output += "\n" - output += "\t%s (%s): code=%i" % (conn, token, result.exited) + output += "\t%s (%s): code=%i" % (conn.host, token, result.exited) if result.stdout: output += "\n\t\tSTDOUT: %s" % result.stdout.replace("\n", "\n\t\t ") if result.stderr: diff --git a/kubemarine/core/group.py b/kubemarine/core/group.py index 44ac34970..2d5325323 100755 --- a/kubemarine/core/group.py +++ b/kubemarine/core/group.py @@ -360,10 +360,10 @@ def _make_result_or_fail(self, results: _HostToResult, return group_result - def run(self, *args, **kwargs) -> NodeGroupResult: + def run(self, *args, **kwargs) -> NodeGroupResult or int: return self.do("run", *args, **kwargs) - def sudo(self, *args, **kwargs) -> NodeGroupResult: + def sudo(self, *args, **kwargs) -> NodeGroupResult or int: return self.do("sudo", *args, **kwargs) def put(self, local_file: Union[io.StringIO, str], remote_file: str, **kwargs): @@ -875,6 +875,12 @@ def get_nodes_names(self) -> List[str]: result.append(node['name']) return result + def get_node_name(self) -> str: + if len(self.nodes) != 1: + raise Exception("Cannot get the only name from not a single node") + + return self.get_first_member(provide_node_configs=True)['name'] + def get_hosts(self) -> List[str]: members = self.get_ordered_members_list(provide_node_configs=True) return [node['connect_to'] for node in members] diff --git a/kubemarine/packages.py b/kubemarine/packages.py index 88919dac9..a6d645caa 100644 --- a/kubemarine/packages.py +++ b/kubemarine/packages.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from copy import deepcopy from typing import List, Dict +import fabric + from kubemarine import yum, apt from kubemarine.core.cluster import KubernetesCluster from kubemarine.core.executor import RemoteExecutor @@ -31,16 +33,22 @@ "Align them to the single version manually or using corresponding task of install procedure. " \ "Alternatively, specify cache_versions=false for corresponding association." +ERROR_SEMANAGE_NOT_MANAGED_DEBIAN = "semanage is not managed for debian OS family by KubeMarine" + def enrich_inventory_associations(inventory, cluster: KubernetesCluster): associations: dict = inventory['services']['packages']['associations'] - os_propagated_associations = {} + enriched_associations = {} - # move associations for OS families as-is + # Move associations for OS families and merge with globals for association_name in get_associations_os_family_keys(): - os_propagated_associations[association_name] = associations.pop(association_name) - - inventory['services']['packages']['associations'] = os_propagated_associations + os_associations: dict = deepcopy(cluster.globals['packages']['common_associations']) + if association_name == 'debian': + del os_associations['semanage'] + for association_params in os_associations.values(): + del association_params['groups'] + default_merger.merge(os_associations, associations.pop(association_name)) + enriched_associations[association_name] = os_associations # Check remained associations section if they are customized at global level. if associations: @@ -49,7 +57,12 @@ def enrich_inventory_associations(inventory, cluster: KubernetesCluster): raise Exception(ERROR_GLOBAL_ASSOCIATIONS_REDEFINED_MULTIPLE_OS) elif os_family not in ('unknown', 'unsupported'): # move remained associations properties to the specific OS family section and merge with priority - default_merger.merge(os_propagated_associations[os_family], associations) + default_merger.merge(enriched_associations[os_family], associations) + + if 'semanage' in enriched_associations['debian']: + raise Exception(ERROR_SEMANAGE_NOT_MANAGED_DEBIAN) + + inventory['services']['packages']['associations'] = enriched_associations return inventory @@ -74,7 +87,7 @@ def enrich_inventory_include_all(inventory: dict, _): return inventory -def cache_package_versions(cluster: KubernetesCluster, inventory: dict, ensured_associations_only=False) -> dict: +def cache_package_versions(cluster: KubernetesCluster, inventory: dict, by_initial_nodes=False) -> dict: os_ids = cluster.get_os_identifiers() different_os = list(set(os_ids.values())) if len(different_os) > 1: @@ -89,74 +102,127 @@ def cache_package_versions(cluster: KubernetesCluster, inventory: dict, ensured_ cluster.log.debug("Skip caching of packages for unsupported OS.") return inventory - nodes_cache_versions = cluster.nodes['all'].get_final_nodes().get_sudo_nodes() - if nodes_cache_versions.is_empty(): + group = cluster.nodes['all'].get_final_nodes() + if group.nodes_amount() != group.get_sudo_nodes().nodes_amount(): # For add_node/install procedures we check that all nodes are sudoers in prepare.check.sudoer task. - # For check_iaas procedure the nodes might still be not sudoers, so skip caching. - cluster.log.debug(f"There are no nodes with sudo privileges, packages will not be cached.") + # For check_iaas procedure the nodes might still be not sudoers. + # Skip caching if any not-sudoer node found. + cluster.log.debug(f"Some nodes are not sudoers, packages will not be cached.") return inventory - packages_list = _get_packages_to_detect_versions(cluster, inventory, ensured_associations_only) - detected_packages = detect_installed_packages_version_groups(nodes_cache_versions, packages_list) + if by_initial_nodes: + group = group.get_initial_nodes() - _cache_package_associations(cluster, inventory, detected_packages, ensured_associations_only) - _cache_custom_packages(cluster, inventory, detected_packages, ensured_associations_only) + hosts_to_packages = get_all_managed_packages_for_group(group, inventory, by_initial_nodes) + detected_packages = detect_installed_packages_version_hosts(cluster, hosts_to_packages) + + _cache_package_associations(group, inventory, detected_packages, by_initial_nodes) + _cache_custom_packages(cluster, inventory, detected_packages, by_initial_nodes) cluster.log.debug('Package versions detection finished') return inventory -def _get_associations(cluster: KubernetesCluster, inventory: dict): - return inventory['services']['packages']['associations'][cluster.get_os_family()] +def get_all_managed_packages_for_group(group: NodeGroup, inventory: dict, ensured_association_only: bool = False) \ + -> Dict[str, List[str]]: + """ + Returns hosts with list of all managed packages for them. + For associations, only subset of hosts is considered on which the associations are managed by KubeMarine. + + :param group: Group of nodes to get the manager packages for. + :param inventory: Inventory of the cluster. May be different from the inventory of the cluster instance, + if used during finalization. + :param ensured_association_only: Specify whether to take 'cache_versions' property into account for associations. + Additionally, if true, will skip custom packages. + :return: List of packages for each relevant host. + """ + packages_section = inventory['services']['packages'] + hosts_to_packages = {} + for node in group.get_ordered_members_list(): + os_family = node.get_nodes_os() + node_associations = packages_section['associations'].get(os_family, {}) + for association_name in node_associations.keys(): + packages = get_association_hosts_to_packages( + node, inventory, association_name, ensured_association_only) + + packages = next(iter(packages.values()), []) + hosts_to_packages.setdefault(node.get_host(), []).extend(packages) + + custom_install_packages = inventory['services']['packages'].get('install', {}).get('include', []) + if not ensured_association_only and custom_install_packages: + for host in group.get_hosts(): + hosts_to_packages.setdefault(host, []).extend(custom_install_packages) + + return hosts_to_packages -def _get_package_names_for_association(cluster: KubernetesCluster, inventory: dict, association_name: str) -> list: - if association_name in get_associations_os_family_keys(): - return [] +def get_association_hosts_to_packages(group: NodeGroup, inventory: dict, association_name: str, + ensured_association_only: bool = False) \ + -> Dict[str, List[str]]: + """ + Returns hosts with associated packages list for the specified association name. + Only subset of hosts is returned on which the association is managed by KubeMarine. + + :param group: Group of nodes to check the applicability of the association. + :param inventory: Inventory of the cluster. May be different from the inventory of the cluster instance, + if used during finalization. + :param association_name: target association name + :param ensured_association_only: Specify whether to take 'cache_versions' property into account. + :return: List of packages for each relevant host. + """ + cluster = group.cluster - associated_packages = _get_associations(cluster, inventory)[association_name].get('package_name') - if isinstance(associated_packages, str): - associated_packages = [associated_packages] - elif not isinstance(associated_packages, list): - raise Exception('Unsupported associated packages object type') + packages_section = inventory['services']['packages'] + if not packages_section['mandatory'].get(association_name, True): + return {} - return associated_packages + hosts_to_packages = {} + if association_name == 'unzip': + from kubemarine import thirdparties + relevant_group = thirdparties.get_group_require_unzip(cluster, inventory) + else: + groups = cluster.globals['packages']['common_associations'].get(association_name, {}).get('groups', []) + relevant_group = cluster.create_group_from_groups_nodes_names(groups, []) -def _get_packages_for_associations_to_detect(cluster: KubernetesCluster, inventory: dict, association_name: str, - ensured_association_only: bool) -> list: - packages_list = _get_package_names_for_association(cluster, inventory, association_name) - if not packages_list: - return [] + if association_name in ('docker', 'containerd') \ + and association_name != inventory['services']['cri']['containerRuntime']: + relevant_group = cluster.make_group([]) - global_cache_versions = inventory['services']['packages']['cache_versions'] - associated_params = _get_associations(cluster, inventory)[association_name] - if not ensured_association_only or (global_cache_versions and associated_params.get('cache_versions', True)): - return packages_list + relevant_group = relevant_group.intersection_group(group) - return [] + global_cache_versions = packages_section['cache_versions'] + for node in relevant_group.get_ordered_members_list(): + os_family = node.get_nodes_os() + package_associations = packages_section['associations'].get(os_family, {}).get(association_name, {}) + packages = package_associations.get('package_name', []) + if isinstance(packages, str): + packages = [packages] -def _get_packages_to_detect_versions(cluster: KubernetesCluster, inventory: dict, ensured_association_only: bool) -> list: - packages_list = [] - for association_name in _get_associations(cluster, inventory).keys(): - packages_list.extend(_get_packages_for_associations_to_detect( - cluster, inventory, association_name, ensured_association_only)) + if ensured_association_only and not (global_cache_versions and package_associations.get('cache_versions', True)): + packages = [] - if not ensured_association_only and inventory['services']['packages'].get('install', {}): - packages_list.extend(inventory['services']['packages']['install']['include']) + if packages: + hosts_to_packages[node.get_host()] = packages - return packages_list + return hosts_to_packages -def _cache_package_associations(cluster: KubernetesCluster, inventory: dict, +def _cache_package_associations(group: NodeGroup, inventory: dict, detected_packages: Dict[str, Dict[str, List]], ensured_association_only: bool): - for association_name, associated_params in _get_associations(cluster, inventory).items(): - packages_list = _get_packages_for_associations_to_detect( - cluster, inventory, association_name, ensured_association_only) - if not packages_list: + cluster = group.cluster + associations = inventory['services']['packages']['associations'][cluster.get_os_family()] + for association_name, associated_params in associations.items(): + hosts_to_packages = get_association_hosts_to_packages( + group, inventory, association_name, ensured_association_only) + if not hosts_to_packages: continue + # Since all nodes have the same OS family in this case, + # the packages list is the same for all relevant hosts, so take any available. + packages_list = next(iter(hosts_to_packages.values())) + final_packages_list = [] for package in packages_list: final_package = _detect_final_package(cluster, detected_packages, package, ensured_association_only) @@ -177,13 +243,12 @@ def _cache_custom_packages(cluster: KubernetesCluster, inventory: dict, return # packages from direct installation section custom_install_packages = inventory['services']['packages'].get('install', {}) - if custom_install_packages: + if custom_install_packages.get('include', []): final_packages_list = [] for package in custom_install_packages['include']: final_package = _detect_final_package(cluster, detected_packages, package, False) final_packages_list.append(final_package) custom_install_packages['include'] = final_packages_list - return detected_packages def _detect_final_package(cluster: KubernetesCluster, detected_packages: Dict[str, Dict[str, List]], @@ -273,7 +338,7 @@ def get_detect_package_version_cmd(os_family: str, package_name: str) -> str: return cmd -def detect_installed_package_version(group: NodeGroup, package: str) -> NodeGroupResult: +def _detect_installed_package_version(group: NodeGroup, package: str) -> NodeGroupResult: """ Detect package versions for each host on remote group :param group: Group of nodes, where package should be found @@ -293,43 +358,52 @@ def detect_installed_package_version(group: NodeGroup, package: str) -> NodeGrou return group.sudo(cmd) -def detect_installed_packages_version_groups(group: NodeGroup, packages_list: List or str) -> Dict[str, Dict[str, List]]: +def _parse_node_detected_package(result: fabric.runners.Result, package: str) -> str: + node_detected_package = result.stdout.strip() + result.stderr.strip() + # consider version, which ended with special symbol = or - as not installed + # (it is possible in some cases to receive "containerd=" version) + if "not installed" in node_detected_package or "no packages found" in node_detected_package \ + or node_detected_package[-1] == '=' or node_detected_package[-1] == '-': + node_detected_package = f"not installed {package}" + + return node_detected_package + + +def detect_installed_packages_version_hosts(cluster: KubernetesCluster, hosts_to_packages: Dict[str, List[str]]) \ + -> Dict[str, Dict[str, List]]: """ - Detect grouped packages versions on remote group from specified list of packages. - :param group: Group of nodes, where packages should be found - :param packages_list: Single package or list of packages, which versions should be detected. + Detect grouped packages versions for specified list of packages for each remote host. + + :param cluster: KubernetesCluster instance + :param hosts_to_packages: Remote hosts with list of packages to detect versions. :return: Dictionary with grouped versions for each queried package, pointing to list of hosts, e.g. {"foo" -> {"foo-1": [host1, host2]}, "bar" -> {"bar-1": [host1], "bar-2": [host2]}} """ - - cluster = group.cluster - - if isinstance(packages_list, str): - packages_list = [packages_list] - # deduplicate - packages_list = list(set(packages_list)) - if not packages_list: - return {} + for host, packages_list in hosts_to_packages.items(): + if isinstance(packages_list, str): + packages_list = [packages_list] + # deduplicate + hosts_to_packages[host] = list(set(packages_list)) with RemoteExecutor(cluster) as exe: - for package in packages_list: - detect_installed_package_version(group, package) + for host, packages_list in hosts_to_packages.items(): + node = cluster.make_group([host]) + for package in packages_list: + _detect_installed_package_version(node, package) raw_result = exe.get_last_results() + if not raw_result: + return {} + results: Dict[str, Dict[str, List]] = {} - for i, package in enumerate(packages_list): - detected_grouped_packages = {} - for conn, multiple_results in raw_result.items(): - node_detected_package = multiple_results[i].stdout.strip() + multiple_results[i].stderr.strip() - # consider version, which ended with special symbol = or - as not installed - # (it is possible in some cases to receive "containerd=" version) - if "not installed" in node_detected_package or "no packages found" in node_detected_package \ - or node_detected_package[-1] == '=' or node_detected_package[-1] == '-': - node_detected_package = f"not installed {package}" - detected_grouped_packages.setdefault(node_detected_package, []).append(conn.host) - - results[package] = detected_grouped_packages + for conn, multiple_results in raw_result.items(): + multiple_results = list(multiple_results.values()) + host = conn.host + packages_list = hosts_to_packages[host] + for i, package in enumerate(packages_list): + node_detected_package = _parse_node_detected_package(multiple_results[i], package) + results.setdefault(package, {}).setdefault(node_detected_package, []).append(host) return results diff --git a/kubemarine/patches/__init__.py b/kubemarine/patches/__init__.py index 0a3054637..4f51197c0 100644 --- a/kubemarine/patches/__init__.py +++ b/kubemarine/patches/__init__.py @@ -22,7 +22,9 @@ from typing import List from kubemarine.core.patch import Patch +from kubemarine.patches.p1_mandatory_packages_off import MandatoryPackagesOff patches: List[Patch] = [ + MandatoryPackagesOff() ] """List of patches which can be executed strictly in the declared order""" diff --git a/kubemarine/patches/p1_mandatory_packages_off.py b/kubemarine/patches/p1_mandatory_packages_off.py new file mode 100644 index 000000000..dc59c949b --- /dev/null +++ b/kubemarine/patches/p1_mandatory_packages_off.py @@ -0,0 +1,50 @@ +# Copyright 2021-2022 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from textwrap import dedent + +from kubemarine.core import static +from kubemarine.core.action import Action +from kubemarine.core.patch import Patch +from kubemarine.core.resources import DynamicResources + + +class TheAction(Action): + def __init__(self): + super().__init__("Turn mandatory packages off for backward compatibility", recreate_inventory=True) + + def run(self, res: DynamicResources): + for package in static.DEFAULTS["services"]["packages"]['mandatory'].keys(): + res.formatted_inventory().setdefault('services', {}).setdefault('packages', {})\ + .setdefault('mandatory', {})[package] = False + + +class MandatoryPackagesOff(Patch): + def __init__(self): + super().__init__("mandatory_packages_off") + + @property + def action(self) -> Action: + return TheAction() + + @property + def description(self) -> str: + return dedent( + f"""\ + New KubeMarine automatically installs and manages all mandatory packages. + Old users had to specify them inside services.packages.install section. + For backward compatibility KubeMarine should not manage mandatory packages for old clusters. + The patch turns off the management in the inventory. + """.rstrip() + ) diff --git a/kubemarine/plugins/__init__.py b/kubemarine/plugins/__init__.py index 95de5bbef..5c5c36179 100755 --- a/kubemarine/plugins/__init__.py +++ b/kubemarine/plugins/__init__.py @@ -546,8 +546,8 @@ def verify_thirdparty(cluster, thirdparty): % (thirdparty, defined_thirdparties)) -def apply_thirdparty(cluster, thirdparty, plugin_name=None): - return thirdparties.install_thirdparty(cluster, thirdparty) +def apply_thirdparty(cluster: KubernetesCluster, thirdparty: str, plugin_name=None): + return thirdparties.install_thirdparty(cluster.nodes['all'], thirdparty) # **** SHELL **** @@ -562,8 +562,10 @@ def convert_shell(cluster, config): def verify_shell(cluster, config): out_vars = config.get('out_vars', []) - explicit_group = cluster.create_group_from_groups_nodes_names(config.get('groups', []), config.get('nodes', [])) - if out_vars and explicit_group and explicit_group.nodes_amount() != 1: + groups = config.get('groups', []) + nodes = config.get('nodes', []) + explicit_group = cluster.create_group_from_groups_nodes_names(groups, nodes) + if out_vars and (groups or nodes) and explicit_group.nodes_amount() != 1: raise Exception('Shell output variables could be used for single-node groups, but multi-node group was found') in_vars = config.get('in_vars', []) diff --git a/kubemarine/procedures/add_node.py b/kubemarine/procedures/add_node.py index f90357dcc..810d88cdd 100755 --- a/kubemarine/procedures/add_node.py +++ b/kubemarine/procedures/add_node.py @@ -96,7 +96,7 @@ def cache_installed_packages(cluster: KubernetesCluster): It is called first during "add_node" procedure, so that new nodes install exactly the same packages as on other already existing nodes. """ - packages.cache_package_versions(cluster, cluster.inventory, ensured_associations_only=True) + packages.cache_package_versions(cluster, cluster.inventory, by_initial_nodes=True) tasks = OrderedDict(copy.deepcopy(install.tasks)) diff --git a/kubemarine/procedures/check_iaas.py b/kubemarine/procedures/check_iaas.py index f3d819f34..ceda1a5a3 100755 --- a/kubemarine/procedures/check_iaas.py +++ b/kubemarine/procedures/check_iaas.py @@ -429,14 +429,14 @@ def check_access_to_package_repositories(cluster: KubernetesCluster): all_group.put(local_path, random_temp_path, binary=False) if repository_urls: - with RemoteExecutor(cluster, ignore_failed=True) as exe: + with RemoteExecutor(cluster) as exe: for node in all_group.get_ordered_members_list(provide_node_configs=True): # Check with script python_executable = cluster.context['nodes'][node['connect_to']]['python']['executable'] for repository_url in repository_urls: node['connection'].run('%s %s %s %s || echo "Package repository is unavailable"' % (python_executable, random_temp_path, repository_url, - cluster.inventory['timeout_download']), warn=True) + cluster.inventory['timeout_download'])) for conn, url_results in exe.get_last_results().items(): # Check if resolv.conf is actual @@ -464,46 +464,21 @@ def check_access_to_package_repositories(cluster: KubernetesCluster): def check_access_to_packages(cluster: KubernetesCluster): - def get_packages_to_install(node): - ''' - This function get list of packages, that should be installed on node - ''' - packages_to_install = copy(cluster.inventory["services"]["packages"].get("install", {}).get('include', [])) - packages_associations = [] - - # Add docker or containerd - if cluster.inventory['services']['cri']['containerRuntime'] == 'docker': - packages_associations.append('docker') - else: - packages_associations.append('containerd') - # Add haproxy for balancer nodes - if 'balancer' in node['roles']: - packages_associations.append('haproxy') - # Add keepalived for keepalived nodes with vrrp_ips enabled - if cluster.inventory.get('vrrp_ips') and 'keepalived' in node['roles']: - packages_associations.append('keepalived') - # Add audit for debian - if node['connection'].get_nodes_os() not in ['rhel', 'rhel8']: - packages_associations.append('audit') - - for association in packages_associations: - package_name = cluster.get_package_association_for_node(node['connect_to'], association, 'package_name') - if isinstance(package_name, list): - packages_to_install.extend(package_name) - else: - packages_to_install.append(package_name) - return packages_to_install - with TestCase(cluster.context['testsuite'], '014', 'Software', 'Package Availability') as tc: check_package_repositories(cluster) broken = [] warnings = [] - with RemoteExecutor(cluster, ignore_failed=True) as exe: - for node in cluster.nodes['all'].get_ordered_members_list(provide_node_configs=True): - packages_to_check = get_packages_to_install(node) - cluster.log.debug(f"Packages to check for node {node['connect_to']}: {packages_to_check}") + group = cluster.nodes['all'] + hosts_to_packages = packages.get_all_managed_packages_for_group(group, cluster.inventory) + with RemoteExecutor(cluster) as exe: + for host, packages_to_check in hosts_to_packages.items(): + packages_to_check = list(set(packages_to_check)) + hosts_to_packages[host] = packages_to_check + cluster.log.debug(f"Packages to check for node {host}: {packages_to_check}") + + node = cluster.make_group([host]) for package in packages_to_check: - packages.search_package(node['connection'], package, warn=True) + packages.search_package(node, package) # Check packages from install section for conn, results in exe.get_last_results().items(): @@ -515,7 +490,7 @@ def get_packages_to_install(node): problem_handler = warnings else: problem_handler = broken - packages_to_check = get_packages_to_install(cluster.get_node(conn)) + packages_to_check = hosts_to_packages[conn.host] for i, result in enumerate(results.values()): if "Package is unavailable" in result.stdout: problem_handler.append(f"Package {packages_to_check[i]} is unavailable for node {conn.host}") diff --git a/kubemarine/procedures/check_paas.py b/kubemarine/procedures/check_paas.py index a3fc2e656..4b0894e19 100755 --- a/kubemarine/procedures/check_paas.py +++ b/kubemarine/procedures/check_paas.py @@ -17,7 +17,7 @@ import time from collections import OrderedDict import re -from typing import List +from typing import List, Dict import yaml import ruamel.yaml @@ -184,20 +184,33 @@ def system_packages_versions(cluster: KubernetesCluster, pckg_alias: str): """ with TestCase(cluster.context['testsuite'], '205', "Services", f"{pckg_alias} version") as tc: _check_same_os(cluster) - if pckg_alias == "docker" or pckg_alias == "containerd": - group = cluster.nodes['control-plane'].include_group(cluster.nodes.get('worker')) - elif pckg_alias == "keepalived" or pckg_alias == "haproxy": - if "balancer" in cluster.nodes and not cluster.nodes['balancer'].is_empty(): - group = cluster.nodes['balancer'] - else: - raise TestWarn("balancer group is not present") - else: - raise Exception(f"Unknown system package alias: {pckg_alias}") + hosts_to_packages = pckgs.get_association_hosts_to_packages(cluster.nodes['all'], cluster.inventory, pckg_alias) + if not hosts_to_packages: + raise TestWarn(f"No nodes to check {pckg_alias!r} version") + + return check_packages_versions(cluster, tc, hosts_to_packages) + + +def mandatory_packages_versions(cluster: KubernetesCluster): + """ + Verifies that mandatory packages are installed on required nodes and have equal versions. + Failure is shown if check is not successful. + :param cluster: main cluster object. + """ + with TestCase(cluster.context['testsuite'], '205', "Services", "Mandatory package versions") as tc: + _check_same_os(cluster) + hosts_to_packages = {} + group = cluster.nodes['all'] + for package in cluster.inventory["services"]["packages"]['mandatory'].keys(): + packages = pckgs.get_association_hosts_to_packages(group, cluster.inventory, package) + + for host, packages_list in packages.items(): + hosts_to_packages.setdefault(host, []).extend(packages_list) + + if not hosts_to_packages: + raise TestWarn(f"No mandatory packages to check") - packages = cluster.get_package_association(pckg_alias, 'package_name') - if not isinstance(packages, list): - packages = [packages] - return check_packages_versions(cluster, tc, group, packages) + return check_packages_versions(cluster, tc, hosts_to_packages) def generic_packages_versions(cluster: KubernetesCluster): @@ -207,23 +220,23 @@ def generic_packages_versions(cluster: KubernetesCluster): """ with TestCase(cluster.context['testsuite'], '206', "Services", f"Generic packages version") as tc: _check_same_os(cluster) - packages = cluster.inventory['services']['packages']['install']['include'] - return check_packages_versions(cluster, tc, cluster.nodes['all'], packages, warn_on_bad_result=True) + packages = cluster.inventory['services']['packages'].get('install', {}).get('include', []) + hosts_to_packages = {host: packages for host in cluster.nodes['all'].get_hosts()} + return check_packages_versions(cluster, tc, hosts_to_packages, warn_on_bad_result=True) -def check_packages_versions(cluster, tc, group, packages, warn_on_bad_result=False): +def check_packages_versions(cluster, tc, hosts_to_packages: Dict[str, List[str]], warn_on_bad_result=False): """ Verifies that all packages are installed on required nodes and have equal versions :param cluster: main cluster object :param tc: current test case object - :param group: nodes where to check packages - :param packages: list of packages to check + :param hosts_to_packages: hosts where to check packages :param warn_on_bad_result: if true then uses Warning instead of Failure. Default False. """ bad_results = [] good_results = [] - packages_map = pckgs.detect_installed_packages_version_groups(group, packages) + packages_map = pckgs.detect_installed_packages_version_hosts(cluster, hosts_to_packages) for package, version_map in packages_map.items(): if len(version_map) != 1: cluster.log.debug(f"Package {package} has different versions:") @@ -372,6 +385,7 @@ def thirdparties_hashes(cluster): # SHA is correct, now check if it is an archive and if it does, then also check SHA for archive content if 'unpack' in config: unpack_dir = config['unpack'] + # TODO support zip res = group.sudo('tar tf %s | grep -vw "./" | while read file_name; do ' # for each file in archive ' echo ${file_name} ' # print 1) filename ' $(sudo tar xfO %s ${file_name} | openssl sha1 | cut -d\\ -f2) ' # 2) sha archive @@ -617,7 +631,7 @@ def verify_selinux_status(cluster: KubernetesCluster) -> None: :param cluster: KubernetesCluster object :return: None """ - if cluster.get_os_family() == 'debian': + if cluster.get_os_family() not in ('rhel', 'rhel8'): return with TestCase(cluster.context['testsuite'], '213', "Security", "Selinux security policy") as tc: @@ -676,7 +690,7 @@ def verify_selinux_config(cluster: KubernetesCluster) -> None: :param cluster: KubernetesCluster object :return: None """ - if cluster.get_os_family() == 'debian': + if cluster.get_os_family() not in ('rhel', 'rhel8'): return with TestCase(cluster.context['testsuite'], '214', "Security", "Selinux configuration") as tc: @@ -1311,7 +1325,9 @@ def verify_apparmor_config(cluster: KubernetesCluster) -> None: 'cri_version': lambda cluster: system_packages_versions(cluster, cluster.inventory['services']['cri'][ 'containerRuntime']), 'haproxy_version': lambda cluster: system_packages_versions(cluster, 'haproxy'), - 'keepalived_version': lambda cluster: system_packages_versions(cluster, 'keepalived') + 'keepalived_version': lambda cluster: system_packages_versions(cluster, 'keepalived'), + 'audit_version': lambda cluster: system_packages_versions(cluster, 'audit'), + 'mandatory_versions': mandatory_packages_versions }, 'generic': { 'version': generic_packages_versions diff --git a/kubemarine/procedures/do.py b/kubemarine/procedures/do.py index 3b672cdd5..38f9d10b1 100755 --- a/kubemarine/procedures/do.py +++ b/kubemarine/procedures/do.py @@ -40,7 +40,7 @@ def __init__(self, node_group_provider: Callable[[KubernetesCluster], NodeGroup] def run(self, res: DynamicResources): executors_group = self.node_group_provider(res.cluster()) - if not executors_group or executors_group.nodes_amount() < 1: + if executors_group.is_empty(): print('Failed to find any of specified nodes or groups') sys.exit(1) diff --git a/kubemarine/procedures/install.py b/kubemarine/procedures/install.py index 5390af63a..260e54cae 100755 --- a/kubemarine/procedures/install.py +++ b/kubemarine/procedures/install.py @@ -165,12 +165,12 @@ def system_prepare_system_modprobe(group: NodeGroup): @_applicable_for_new_nodes_with_roles('control-plane', 'worker') def system_install_audit(group: NodeGroup): - group.cluster.log.debug(group.call(audit.install)) + group.call(audit.install) @_applicable_for_new_nodes_with_roles('control-plane', 'worker') def system_prepare_audit_daemon(group: NodeGroup): - group.cluster.log.debug(group.call(audit.apply_audit_rules)) + group.call(audit.apply_audit_rules) @_applicable_for_new_nodes_with_roles('control-plane') @@ -276,6 +276,30 @@ def system_prepare_package_manager_configure(group: NodeGroup): @_applicable_for_new_nodes_with_roles('all') def system_prepare_package_manager_manage_packages(group: NodeGroup): + group.call_batch([ + manage_mandatory_packages, + manage_custom_packages + ]) + + +def manage_mandatory_packages(group: NodeGroup): + cluster = group.cluster + + with RemoteExecutor(cluster) as exe: + for node in group.get_ordered_members_list(): + pkgs = [] + for package in cluster.inventory["services"]["packages"]['mandatory'].keys(): + hosts_to_packages = packages.get_association_hosts_to_packages(node, cluster.inventory, package) + pkgs.extend(next(iter(hosts_to_packages.values()), [])) + + if pkgs: + cluster.log.debug(f"Installing {pkgs} on {node.get_node_name()!r}") + packages.install(node, pkgs) + + return exe.get_merged_result() + + +def manage_custom_packages(group: NodeGroup): cluster = group.cluster batch_tasks = [] batch_parameters = {} diff --git a/kubemarine/procedures/upgrade.py b/kubemarine/procedures/upgrade.py index ea0b240e0..c7019fecf 100755 --- a/kubemarine/procedures/upgrade.py +++ b/kubemarine/procedures/upgrade.py @@ -86,12 +86,12 @@ def kubernetes_cleanup_nodes_versions(cluster): kubernetes_apply_taints(cluster) -def upgrade_packages(cluster): +def upgrade_packages(cluster: KubernetesCluster): upgrade_version = cluster.context["upgrade_version"] packages = cluster.procedure_inventory.get(upgrade_version, {}).get("packages", {}) if packages.get("install") is not None or packages.get("upgrade") is not None or packages.get("remove") is not None: - install.system_prepare_package_manager_manage_packages(cluster) + install.manage_custom_packages(cluster.nodes['all']) def upgrade_plugins(cluster): diff --git a/kubemarine/resources/configurations/defaults.yaml b/kubemarine/resources/configurations/defaults.yaml index 30807e8c8..85c25b047 100644 --- a/kubemarine/resources/configurations/defaults.yaml +++ b/kubemarine/resources/configurations/defaults.yaml @@ -312,103 +312,81 @@ services: packages: cache_versions: true + mandatory: + conntrack: true + iptables: true + openssl: true + curl: true + unzip: true + semanage: true + kmod: true package_manager: replace-repositories: false associations: debian: docker: - executable_name: 'docker' package_name: - 'docker-ce={{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_debian }}' - 'docker-ce-cli={{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_debian }}' - 'containerd.io={{ globals.compatibility_map.software.containerdio[services.kubeadm.kubernetesVersion].version_debian }}' - service_name: 'docker' - config_location: '/etc/docker/daemon.json' containerd: - executable_name: 'containerd' package_name: - 'containerd={{ globals.compatibility_map.software.containerd[services.kubeadm.kubernetesVersion].version_debian }}' - 'podman={{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_debian }}' - service_name: 'containerd' - config_location: '/etc/containerd/config.toml' haproxy: executable_name: 'haproxy' package_name: 'haproxy={{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_debian }}' service_name: 'haproxy' - config_location: '/etc/haproxy/haproxy.cfg' keepalived: - executable_name: 'keepalived' package_name: 'keepalived={{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_debian }}' - service_name: 'keepalived' - config_location: '/etc/keepalived/keepalived.conf' audit: - executable_name: 'auditctl' package_name: 'auditd' - service_name: 'auditd' - config_location: '/etc/audit/rules.d/predefined.rules' + conntrack: + package_name: 'conntrack' rhel: docker: - executable_name: 'docker' package_name: - 'docker-ce-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel }}' - 'docker-ce-cli-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel }}' - 'containerd.io-{{ globals.compatibility_map.software.containerdio[services.kubeadm.kubernetesVersion].version_rhel }}' - service_name: 'docker' - config_location: '/etc/docker/daemon.json' containerd: - executable_name: 'containerd' package_name: - 'containerd.io-{{ globals.compatibility_map.software.containerdio[services.kubeadm.kubernetesVersion].version_rhel }}' - 'podman-{{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_rhel }}' - service_name: 'containerd' - config_location: '/etc/containerd/config.toml' haproxy: executable_name: '/opt/rh/rh-haproxy18/root/usr/sbin/haproxy' package_name: 'rh-haproxy18-haproxy-{{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_rhel }}' service_name: 'rh-haproxy18-haproxy' - config_location: '/etc/haproxy/haproxy.cfg' keepalived: - executable_name: 'keepalived' package_name: 'keepalived-{{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_rhel }}' - service_name: 'keepalived' - config_location: '/etc/keepalived/keepalived.conf' audit: - executable_name: 'auditctl' package_name: 'audit' - service_name: 'auditd' - config_location: '/etc/audit/rules.d/predefined.rules' + conntrack: + package_name: 'conntrack-tools' + semanage: + package_name: 'policycoreutils-python' rhel8: docker: - executable_name: 'docker' package_name: - 'docker-ce-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel8 }}' - 'docker-ce-cli-{{ globals.compatibility_map.software.docker[services.kubeadm.kubernetesVersion].version_rhel8 }}' - 'containerd.io-{{ globals.compatibility_map.software.containerdio[services.kubeadm.kubernetesVersion].version_rhel8 }}' - service_name: 'docker' - config_location: '/etc/docker/daemon.json' containerd: - executable_name: 'containerd' package_name: - 'containerd.io-{{ globals.compatibility_map.software.containerdio[services.kubeadm.kubernetesVersion].version_rhel8 }}' - 'podman-{{ globals.compatibility_map.software.podman[services.kubeadm.kubernetesVersion].version_rhel8 }}' - service_name: 'containerd' - config_location: '/etc/containerd/config.toml' haproxy: executable_name: '/usr/sbin/haproxy' package_name: 'haproxy-{{ globals.compatibility_map.software.haproxy[services.kubeadm.kubernetesVersion].version_rhel8 }}' service_name: 'haproxy' - config_location: '/etc/haproxy/haproxy.cfg' keepalived: - executable_name: 'keepalived' package_name: 'keepalived-{{ globals.compatibility_map.software.keepalived[services.kubeadm.kubernetesVersion].version_rhel8 }}' - service_name: 'keepalived' - config_location: '/etc/keepalived/keepalived.conf' audit: - executable_name: 'auditctl' package_name: 'audit' - service_name: 'auditd' - config_location: '/etc/audit/rules.d/predefined.rules' - + conntrack: + package_name: 'conntrack-tools' + semanage: + package_name: 'policycoreutils-python-utils' plugin_defaults: installation: {} diff --git a/kubemarine/resources/configurations/globals.yaml b/kubemarine/resources/configurations/globals.yaml index 06a6861fd..ea84d5ca7 100644 --- a/kubemarine/resources/configurations/globals.yaml +++ b/kubemarine/resources/configurations/globals.yaml @@ -116,6 +116,63 @@ thirdparties: /usr/bin/crictl.tar.gz: software_name: crictl +packages: + common_associations: + docker: + executable_name: 'docker' + service_name: 'docker' + config_location: '/etc/docker/daemon.json' + groups: + - control-plane + - worker + containerd: + executable_name: 'containerd' + service_name: 'containerd' + config_location: '/etc/containerd/config.toml' + groups: + - control-plane + - worker + haproxy: + config_location: '/etc/haproxy/haproxy.cfg' + groups: + - balancer + keepalived: + executable_name: 'keepalived' + service_name: 'keepalived' + config_location: '/etc/keepalived/keepalived.conf' + groups: + - keepalived + audit: + executable_name: 'auditctl' + service_name: 'auditd' + config_location: '/etc/audit/rules.d/predefined.rules' + groups: + - control-plane + - worker + conntrack: + groups: + - control-plane + - worker + iptables: + package_name: 'iptables' + groups: + - control-plane + - worker + openssl: + package_name: 'openssl' + groups: [ control-plane, worker, balancer ] + curl: + package_name: 'curl' + groups: [ control-plane, worker, balancer ] + unzip: + package_name: 'unzip' + groups: [] + kmod: + package_name: 'kmod' + groups: [ control-plane, worker, balancer ] + semanage: + groups: [ control-plane, worker, balancer ] + compatibility_map: software: docker: diff --git a/kubemarine/resources/schemas/definitions/services/packages.json b/kubemarine/resources/schemas/definitions/services/packages.json index f831df038..060556b38 100644 --- a/kubemarine/resources/schemas/definitions/services/packages.json +++ b/kubemarine/resources/schemas/definitions/services/packages.json @@ -4,6 +4,40 @@ "description": "Section for packages and their management", "allOf": [{"$ref": "#/definitions/Properties"}], "properties": { + "package_manager": { + "type": "object", + "description": "Additional package manager repositories for the cluster in closed environment", + "properties": { + "replace-repositories": { + "type": "boolean", + "default": false, + "description": "Deletes old repositories on hosts and installs new ones instead" + }, + "repositories": { + "description": "List of new repositories. Can be specified as string with all repositories content, or as list for apt repositories, or as dictionary for yum repositories.", + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/YumRepositories"}, + {"$ref": "#/definitions/AptRepositories"} + ] + } + }, + "additionalProperties": false + }, + "mandatory": { + "type": "object", + "description": "Specify whether to install or skip mandatory packages", + "properties": { + "conntrack": {"type": "boolean", "default": true}, + "iptables": {"type": "boolean", "default": true}, + "openssl": {"type": "boolean", "default": true}, + "curl": {"type": "boolean", "default": true}, + "unzip": {"type": "boolean", "default": true}, + "semanage": {"type": "boolean", "default": true}, + "kmod": {"type": "boolean", "default": true} + }, + "additionalProperties": false + }, "associations": { "$ref": "packages/associations.json", "description": "Configure predefined associations of package objects. If configured in the common section, the associations are automatically switched to the section corresponding to your operating system." @@ -12,32 +46,12 @@ "propertyNames": { "anyOf": [ {"$ref": "#/definitions/PropertyNames"}, - {"enum": ["associations"]} + {"enum": ["package_manager", "mandatory", "associations"]} ] }, "definitions": { "Properties": { "properties": { - "package_manager": { - "type": "object", - "description": "Additional package manager repositories for the cluster in closed environment", - "properties": { - "replace-repositories": { - "type": "boolean", - "default": false, - "description": "Deletes old repositories on hosts and installs new ones instead" - }, - "repositories": { - "description": "List of new repositories. Can be specified as string with all repositories content, or as list for apt repositories, or as dictionary for yum repositories.", - "oneOf": [ - {"type": "string"}, - {"$ref": "#/definitions/YumRepositories"}, - {"$ref": "#/definitions/AptRepositories"} - ] - } - }, - "additionalProperties": false - }, "install": { "description": "List of custom packages to install. Can be specified as list or as two lists that are to be included and excluded during processing.", "oneOf": [ @@ -76,7 +90,7 @@ } }, "PropertyNames": { - "enum": ["package_manager", "install", "upgrade", "remove", "cache_versions"] + "enum": ["install", "upgrade", "remove", "cache_versions"] }, "YumRepositories": { "type": "object", @@ -91,7 +105,7 @@ } }, "AptRepositories": { - "$ref": "../common/utils.json#/definitions/MergeableSetOfStrings" + "$ref": "../common/utils.json#/definitions/NonEmptySetOfStrings" }, "IncludeExcludePermissive": { "type": "object", diff --git a/kubemarine/resources/schemas/definitions/services/packages/associations.json b/kubemarine/resources/schemas/definitions/services/packages/associations.json index 0de330d88..843819034 100644 --- a/kubemarine/resources/schemas/definitions/services/packages/associations.json +++ b/kubemarine/resources/schemas/definitions/services/packages/associations.json @@ -14,14 +14,8 @@ ] }, "definitions": { - "PackageAssociations": { - "type": "object", - "description": "Associations related to specific package", + "PackageAssociationsProperties": { "properties": { - "executable_name": { - "type": "string", - "description": "Path to or name of the associated binary executable" - }, "package_name": { "description": "Packages with versions to install if the associated service is required for the cluster", "oneOf": [ @@ -39,6 +33,33 @@ } ] }, + "cache_versions": { + "type": "boolean", + "default": true, + "description": "Specifies whether to install exactly the same package versions from package_name section during the add_node procedure" + } + } + }, + "PackageAssociationsPropertyNames": { + "enum": ["package_name", "cache_versions"] + }, + "PackageAssociations": { + "type": "object", + "description": "Associations related to specific package", + "allOf": [{"$ref": "#/definitions/PackageAssociationsProperties"}], + "propertyNames": { + "$ref": "#/definitions/PackageAssociationsPropertyNames" + } + }, + "ServicePackageAssociations": { + "type": "object", + "description": "Associations related to specific package", + "allOf": [{"$ref": "#/definitions/PackageAssociationsProperties"}], + "properties": { + "executable_name": { + "type": "string", + "description": "Path to or name of the associated binary executable" + }, "service_name": { "type": "string", "description": "Name of systemd service" @@ -46,14 +67,14 @@ "config_location": { "type": "string", "description": "Path to the configuration file of the associated service" - }, - "cache_versions": { - "type": "boolean", - "default": true, - "description": "Specifies whether to install exactly the same package versions from package_name section during the add_node procedure" } }, - "additionalProperties": false + "propertyNames": { + "anyOf": [ + {"$ref": "#/definitions/PackageAssociationsPropertyNames"}, + {"enum": ["executable_name", "service_name", "config_location"]} + ] + } }, "OSFamilyAssociations": { "type": "object", @@ -65,15 +86,25 @@ }, "Associations": { "properties": { - "docker": {"$ref": "#/definitions/PackageAssociations"}, - "containerd": {"$ref": "#/definitions/PackageAssociations"}, - "haproxy": {"$ref": "#/definitions/PackageAssociations"}, - "keepalived": {"$ref": "#/definitions/PackageAssociations"}, - "audit": {"$ref": "#/definitions/PackageAssociations"} + "docker": {"$ref": "#/definitions/ServicePackageAssociations"}, + "containerd": {"$ref": "#/definitions/ServicePackageAssociations"}, + "haproxy": {"$ref": "#/definitions/ServicePackageAssociations"}, + "keepalived": {"$ref": "#/definitions/ServicePackageAssociations"}, + "audit": {"$ref": "#/definitions/ServicePackageAssociations"}, + "conntrack": {"$ref": "#/definitions/PackageAssociations"}, + "iptables": {"$ref": "#/definitions/PackageAssociations"}, + "openssl": {"$ref": "#/definitions/PackageAssociations"}, + "curl": {"$ref": "#/definitions/PackageAssociations"}, + "unzip": {"$ref": "#/definitions/PackageAssociations"}, + "semanage": {"$ref": "#/definitions/PackageAssociations"}, + "kmod": {"$ref": "#/definitions/PackageAssociations"} } }, "AssociationsNames": { - "enum": ["docker", "containerd", "haproxy", "keepalived", "audit"] + "enum": [ + "docker", "containerd", "haproxy", "keepalived", "audit", + "conntrack", "iptables", "openssl", "curl", "unzip", "semanage", "kmod" + ] } } -} \ No newline at end of file +} diff --git a/kubemarine/resources/schemas/migrate_cri.json b/kubemarine/resources/schemas/migrate_cri.json index b15a6813f..b12badc38 100644 --- a/kubemarine/resources/schemas/migrate_cri.json +++ b/kubemarine/resources/schemas/migrate_cri.json @@ -27,7 +27,7 @@ "description": "Specify associations for containerd", "properties": { "containerd": { - "$ref": "definitions/services/packages/associations.json#/definitions/PackageAssociations" + "$ref": "definitions/services/packages/associations.json#/definitions/ServicePackageAssociations" } }, "additionalProperties": false diff --git a/kubemarine/resources/schemas/upgrade.json b/kubemarine/resources/schemas/upgrade.json index 18c69fb8a..fb609aaac 100644 --- a/kubemarine/resources/schemas/upgrade.json +++ b/kubemarine/resources/schemas/upgrade.json @@ -50,10 +50,11 @@ "associations": { "type": "object", "description": "Configure associations of package objects to be used during upgrade", - "allOf": [{"$ref": "definitions/services/packages/associations.json#/definitions/Associations"}], - "propertyNames": { - "$ref": "definitions/services/packages/associations.json#/definitions/AssociationsNames" - } + "properties": { + "docker": {"$ref": "definitions/services/packages/associations.json#/definitions/ServicePackageAssociations"}, + "containerd": {"$ref": "definitions/services/packages/associations.json#/definitions/ServicePackageAssociations"} + }, + "additionalProperties": false } }, "propertyNames": { diff --git a/kubemarine/system.py b/kubemarine/system.py index cb39243a3..4e47a2b49 100644 --- a/kubemarine/system.py +++ b/kubemarine/system.py @@ -155,7 +155,7 @@ def get_system_packages_for_upgrade(cluster): def get_system_packages(cluster): - return ["haproxy", "keepalived", cluster.inventory['services']['cri']['containerRuntime']] + return [cluster.inventory['services']['cri']['containerRuntime']] def fetch_os_versions(cluster: KubernetesCluster): diff --git a/kubemarine/thirdparties.py b/kubemarine/thirdparties.py index 0378b0994..5734a5b7d 100644 --- a/kubemarine/thirdparties.py +++ b/kubemarine/thirdparties.py @@ -16,6 +16,7 @@ from kubemarine.core import utils, static from kubemarine.core.cluster import KubernetesCluster +from kubemarine.core.group import NodeGroupResult, NodeGroup def enrich_inventory_apply_upgrade_defaults(inventory, cluster): @@ -117,14 +118,40 @@ def enrich_inventory_apply_defaults(inventory, cluster): return inventory -def install_thirdparty(cluster: KubernetesCluster, destination, config=None): +def get_install_group(cluster: KubernetesCluster, config: dict): + return cluster.create_group_from_groups_nodes_names( + config.get('groups', []), config.get('nodes', [])) + + +def get_group_require_unzip(cluster: KubernetesCluster, inventory: dict) -> NodeGroup: + thirdparties: dict = inventory['services']['thirdparties'] + + group = cluster.make_group([]) + for destination, config in thirdparties.items(): + extension = destination.split('.')[-1] + if config.get('unpack') is None or extension != 'zip': + continue + + install_group = get_install_group(cluster, config) + group = group.include_group(install_group) + + return group - if config is None: - config = cluster.inventory['services'].get('thirdparties', {}).get(destination) + +def install_thirdparty(filter_group: NodeGroup, destination: str) -> NodeGroupResult or None: + cluster = filter_group.cluster + config = cluster.inventory['services'].get('thirdparties', {}).get(destination) if config is None: raise Exception('Not possible to install thirdparty %s - not found in configfile' % destination) + common_group = get_install_group(cluster, config) + common_group = common_group.intersection_group(filter_group) + + if common_group.is_empty(): + cluster.log.verbose(f'No destination nodes to install thirdparty {destination!r}') + return + cluster.log.debug("Thirdparty \"%s\" will be installed" % destination) is_curl = config['source'][:4] == 'http' and '://' in config['source'][4:8] @@ -135,11 +162,6 @@ def install_thirdparty(cluster: KubernetesCluster, destination, config=None): destination_directory = '/'.join(destination.split('/')[:-1]) cluster.log.verbose('Destination directory: %s' % destination_directory) - common_group = cluster.create_group_from_groups_nodes_names(config.get('groups', []), config.get('nodes', [])) - - if cluster.context['initial_procedure'] == 'add_node': - common_group = common_group.get_new_nodes() - # ! In the further code there is no error and nothing is missing ! # Here a long shell command is intentionally constructed and executed at once to speed up work # At the same time, in the middle of the construction of the command, a file may suddenly be uploaded and then @@ -179,11 +201,13 @@ def install_thirdparty(cluster: KubernetesCluster, destination, config=None): extension = destination.split('.')[-1] if extension == 'zip': cluster.log.verbose('Unzip will be used for unpacking') + # TODO re-installation waits forever remote_commands += ' && sudo unzip %s -d %s' % (destination, config['unpack']) else: cluster.log.verbose('Tar will be used for unpacking') remote_commands += ' && sudo tar -zxf %s -C %s' % (destination, config['unpack']) + # TODO acсess rights do not work for zip remote_commands += ' && sudo tar -tf %s | xargs -I FILE sudo chmod %s %s/FILE' \ % (destination, config['mode'], config['unpack']) remote_commands += ' && sudo tar -tf %s | xargs -I FILE sudo chown %s %s/FILE' \ @@ -197,15 +221,15 @@ def install_all_thirparties(group): cluster = group.cluster log = cluster.log - if not group.cluster.inventory['services'].get('thirdparties', {}): + if not cluster.inventory['services'].get('thirdparties', {}): return - for destination, config in group.cluster.inventory['services']['thirdparties'].items(): + for destination in cluster.inventory['services']['thirdparties'].keys(): skip_thirdparty = False if cluster.context.get("initial_procedure") != "add_node": # TODO: speed up algorithm via else/continue/break - for plugin_name, plugin_configs in group.cluster.inventory['plugins'].items(): + for plugin_name, plugin_configs in cluster.inventory['plugins'].items(): for plugin_procedure in plugin_configs['installation']['procedures']: if plugin_procedure.get('thirdparty') == destination: log.verbose('Thirdparty \'%s\' should be installed with \'%s\' plugin' @@ -215,5 +239,6 @@ def install_all_thirparties(group): if skip_thirdparty: log.verbose('Thirdparty %s installation delayed' % destination) else: - res = install_thirdparty(group.cluster, destination, config) - log.debug(res) + res = install_thirdparty(group, destination) + if res is not None: + log.debug(res) diff --git a/kubemarine/yum.py b/kubemarine/yum.py index 2382dbabb..5a8349ec9 100644 --- a/kubemarine/yum.py +++ b/kubemarine/yum.py @@ -56,10 +56,7 @@ def clean(group, mode="all", **kwargs): return group.sudo("yum clean %s" % mode, **kwargs) -def install(group, include=None, exclude=None, **kwargs): - if include is None: - raise Exception('You must specify included packages to install') - +def get_install_cmd(include: str or list, exclude=None) -> str: if isinstance(include, list): include = ' '.join(include) command = 'yum install -y %s' % include @@ -71,9 +68,17 @@ def install(group, include=None, exclude=None, **kwargs): command += f"; rpm -q {include}; if [ $? != 0 ]; then echo \"Failed to check version for some packages. " \ f"Make sure packages are not already installed with higher versions. " \ f"Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi " - install_result = group.sudo(command, **kwargs) - return install_result + return command + + +def install(group, include=None, exclude=None, **kwargs): + if include is None: + raise Exception('You must specify included packages to install') + + command = get_install_cmd(include, exclude) + + return group.sudo(command, **kwargs) def remove(group, include=None, exclude=None, **kwargs): diff --git a/test/unit/core/test_executor.py b/test/unit/core/test_executor.py index dec23791f..a010a259b 100644 --- a/test/unit/core/test_executor.py +++ b/test/unit/core/test_executor.py @@ -70,6 +70,56 @@ def test_get_merged_results_all_excepted(self): for cxn, result in exe.get_merged_nodegroup_results().items(): self.assertIsInstance(result, TimeoutError) + def test_get_merged_results_multiple_commands(self): + results = demo.create_nodegroup_result(self.cluster.nodes["all"], stdout="foo\n") + self.cluster.fake_shell.add(results, "run", ["echo \"foo\""]) + results = demo.create_nodegroup_result(self.cluster.nodes["all"], stdout="bar\n") + self.cluster.fake_shell.add(results, "run", ["echo \"bar\""]) + with RemoteExecutor(self.cluster) as exe: + for host in self.cluster.nodes["all"].get_hosts(): + node = self.cluster.make_group([host]) + node.run("echo \"foo\"") + node.run("echo \"bar\"") + + exe.flush() + + for cxn, result in exe.get_merged_nodegroup_results().items(): + self.assertEqual("foo\nbar\n", result.stdout) + + def test_get_merged_results_filter_last_command_result(self): + results = demo.create_nodegroup_result(self.cluster.nodes["all"], stdout="foo\n") + self.cluster.fake_shell.add(results, "run", ["echo \"foo\""]) + results = demo.create_nodegroup_result(self.cluster.nodes["all"], stdout="bar\n") + self.cluster.fake_shell.add(results, "run", ["echo \"bar\""]) + tokens = [] + with RemoteExecutor(self.cluster) as exe: + for host in self.cluster.nodes["all"].get_hosts(): + node = self.cluster.make_group([host]) + node.run("echo \"foo\"") + tokens.append(node.run("echo \"bar\"")) + + exe.flush() + + for cxn, result in exe.get_merged_nodegroup_results(tokens).items(): + self.assertEqual("bar\n", result.stdout) + + def test_get_merged_results_filter_last_command_result_first_excepted(self): + results = demo.create_nodegroup_result(self.cluster.nodes["all"], code=1) + self.cluster.fake_shell.add(results, "run", ["false"]) + results = demo.create_nodegroup_result(self.cluster.nodes["all"], stdout="bar\n") + self.cluster.fake_shell.add(results, "run", ["echo \"bar\""]) + tokens = [] + with RemoteExecutor(self.cluster) as exe: + for host in self.cluster.nodes["all"].get_hosts(): + node = self.cluster.make_group([host]) + node.run("false") + tokens.append(node.run("echo \"bar\"")) + + exe.flush() + + for cxn, result in exe.get_merged_nodegroup_results(tokens).items(): + self.assertIsInstance(result, UnexpectedExit) + def test_not_throw_on_failed_all_warn(self): results = demo.create_nodegroup_result(self.cluster.nodes["all"], code=1) self.cluster.fake_shell.add(results, "run", ["false"]) diff --git a/test/unit/test_audit.py b/test/unit/test_audit.py index 97ad6ad97..16a2576b5 100644 --- a/test/unit/test_audit.py +++ b/test/unit/test_audit.py @@ -18,7 +18,7 @@ import fabric -from kubemarine import demo, audit +from kubemarine import demo, audit, packages, apt, yum from kubemarine.core.group import NodeGroupResult from kubemarine.demo import FakeKubernetesCluster @@ -34,10 +34,34 @@ def new_debian_cluster(self) -> FakeKubernetesCluster: context['nodes'] = demo.generate_nodes_context(self.inventory, os_name='ubuntu', os_version='20.04') return demo.new_cluster(self.inventory, context=context) + def get_detect_package_version_cmd(self, os_family: str, package_name: str): + return packages.get_detect_package_version_cmd(os_family, package_name) + def test_audit_installation_for_centos(self): context = demo.create_silent_context() context['nodes'] = demo.generate_nodes_context(self.inventory, os_name='centos', os_version='7.9') cluster = demo.new_cluster(self.inventory, context=context) + + package_associations = cluster.inventory['services']['packages']['associations']['rhel']['audit'] + + package_name = package_associations['package_name'] + service_name = package_associations['service_name'] + + # simulate package detection command + exp_results1 = demo.create_nodegroup_result(cluster.nodes['master'], code=0, + stderr='package %s is not installed' % package_name) + cluster.fake_shell.add(exp_results1, 'sudo', [self.get_detect_package_version_cmd('rhel', package_name)]) + + # simulate package installation command + installation_command = [yum.get_install_cmd(package_name)] + exp_results2 = demo.create_nodegroup_result(cluster.nodes['master'], + code=0, stdout='Successfully installed audit') + cluster.fake_shell.add(exp_results2, 'sudo', installation_command) + + # simulate enable package command + exp_results3 = demo.create_nodegroup_result(cluster.nodes['master'], stdout='ok') + cluster.fake_shell.add(exp_results3, 'sudo', ['systemctl enable %s --now' % service_name]) + audit.install(cluster.nodes['master']) def test_audit_installation_for_debian(self): @@ -50,12 +74,10 @@ def test_audit_installation_for_debian(self): # simulate package detection command exp_results1 = demo.create_nodegroup_result(cluster.nodes['master'], code=0, stderr='dpkg-query: no packages found matching %s' % package_name) - cluster.fake_shell.add(exp_results1, 'sudo', ['dpkg-query -f \'${Package}=${Version}\\n\' -W %s || true' - % package_name]) + cluster.fake_shell.add(exp_results1, 'sudo', [self.get_detect_package_version_cmd('debian', package_name)]) # simulate package installation command - installation_command = ['DEBIAN_FRONTEND=noninteractive apt update && ' - 'DEBIAN_FRONTEND=noninteractive sudo apt install -y %s' % package_name] + installation_command = [apt.get_install_cmd(package_name)] exp_results2 = demo.create_nodegroup_result(cluster.nodes['master'], code=0, stdout='Successfully installed audit') cluster.fake_shell.add(exp_results2, 'sudo', installation_command) @@ -74,9 +96,8 @@ def test_audit_installation_when_already_installed_for_debian(self): package_name = package_associations['package_name'] # simulate package detection command - exp_results = demo.create_nodegroup_result(cluster.nodes['master'], code=0, stdout='%s=' % package_name) - cluster.fake_shell.add(exp_results, 'sudo', ['dpkg-query -f \'${Package}=${Version}\\n\' -W %s || true' - % package_name]) + exp_results = demo.create_nodegroup_result(cluster.nodes['master'], code=0, stdout='%s=1:2.8.5-2ubuntu6' % package_name) + cluster.fake_shell.add(exp_results, 'sudo', [self.get_detect_package_version_cmd('debian', package_name)]) # run task audit.install(cluster.nodes['master']) @@ -91,23 +112,21 @@ def test_audit_installation_when_partly_installed_for_debian(self): # simulate package detection command with partly installed audit host_to_result = { - '10.101.1.2': fabric.runners.Result(stdout='%s=' % package_name, + '10.101.1.2': fabric.runners.Result(stdout='%s=1:2.8.5-2ubuntu6' % package_name, exited=0, connection=all_nodes_group['10.101.1.2']), '10.101.1.3': fabric.runners.Result(stderr='dpkg-query: no packages found matching %s' % package_name, exited=0, connection=all_nodes_group['10.101.1.3']), - '10.101.1.4': fabric.runners.Result(stdout='%s=' % package_name, + '10.101.1.4': fabric.runners.Result(stdout='%s=1:2.8.5-2ubuntu6' % package_name, exited=0, connection=all_nodes_group['10.101.1.4']) } exp_results1 = NodeGroupResult(cluster, host_to_result) - cluster.fake_shell.add(exp_results1, 'sudo', ['dpkg-query -f \'${Package}=${Version}\\n\' -W %s || true' - % package_name]) + cluster.fake_shell.add(exp_results1, 'sudo', [self.get_detect_package_version_cmd('debian', package_name)]) # simulate package installation command - installation_command = ['DEBIAN_FRONTEND=noninteractive apt update && ' - 'DEBIAN_FRONTEND=noninteractive sudo apt install -y %s' % package_name] + installation_command = [apt.get_install_cmd(package_name)] exp_results2 = demo.create_nodegroup_result(cluster.nodes['master'], code=0, stdout='Successfully installed audit') cluster.fake_shell.add(exp_results2, 'sudo', installation_command) @@ -136,7 +155,7 @@ def test_audit_configuring(self): expected_results = demo.create_nodegroup_result(cluster.nodes['master'], stdout='restarted', code=0) cluster.fake_shell.add(expected_results, 'sudo', ['service %s restart' % package_name]) - actual_results = audit.apply_audit_rules(cluster.nodes['master'], now=True) + actual_results = audit.apply_audit_rules(cluster.nodes['master']) self.assertEqual(expected_results, actual_results, msg='Configuration task did not did not finished with restart result') diff --git a/test/unit/test_haproxy.py b/test/unit/test_haproxy.py index 18ad95a5f..dbe3ed5fe 100755 --- a/test/unit/test_haproxy.py +++ b/test/unit/test_haproxy.py @@ -16,7 +16,7 @@ import unittest -from kubemarine import haproxy +from kubemarine import haproxy, yum from kubemarine import demo @@ -107,11 +107,7 @@ def test_haproxy_installation_when_not_installed(self): cluster.fake_shell.add(missing_package_result, 'sudo', missing_package_command) # simulate package installation - installation_command = ['yum install -y %s; rpm -q %s; if [ $? != 0 ]; then echo ' - '\"Failed to check version for some packages. ' - 'Make sure packages are not already installed with higher versions. ' - 'Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi ' - % (package_associations['package_name'], package_associations['package_name'])] + installation_command = [yum.get_install_cmd(package_associations['package_name'])] expected_results = demo.create_nodegroup_result(cluster.nodes['balancer'], code=0, stdout='Successfully installed haproxy') cluster.fake_shell.add(expected_results, 'sudo', installation_command) @@ -148,7 +144,7 @@ def get_result_str(results): for conn, result in results.items(): if output != "": output += "\n" - output += "\t%s (%s): code=%i" % (conn, 0, result.exited) + output += "\t%s (%s): code=%i" % (conn.host, 0, result.exited) if result.stdout: output += "\n\t\tSTDOUT: %s" % result.stdout.replace("\n", "\n\t\t ") if result.stderr: diff --git a/test/unit/test_install.py b/test/unit/test_install.py new file mode 100644 index 000000000..af784b8c6 --- /dev/null +++ b/test/unit/test_install.py @@ -0,0 +1,104 @@ +# Copyright 2021-2022 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from kubemarine import demo, packages +from kubemarine.core import static +from kubemarine.procedures import install + + +class ManageMandatoryPackages(unittest.TestCase): + def setUp(self) -> None: + self.inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) + self.context = demo.create_silent_context() + self.context['nodes'] = demo.generate_nodes_context(self.inventory, os_name='ubuntu', os_version='20.04') + self.mandatory_pkgs_setup = {} + for package in static.DEFAULTS["services"]["packages"]['mandatory'].keys(): + self.mandatory_pkgs_setup[package] = [] + for node in self.inventory['nodes']: + host = node['address'] + for pkg in ('conntrack', 'iptables'): + if 'master' in node['roles'] or 'worker' in node['roles']: + self.mandatory_pkgs_setup[pkg].append(host) + for pkg in ('openssl', 'curl', 'kmod'): + self.mandatory_pkgs_setup[pkg].append(host) + + def _new_cluster(self): + cluster = demo.new_cluster(self.inventory, context=self.context) + for node in cluster.nodes['all'].get_ordered_members_list(): + installation_command = self._get_install_cmd(cluster, node.get_host()) + results = demo.create_hosts_result([node.get_host()], stdout=f'Successfully installed') + cluster.fake_shell.add(results, 'sudo', installation_command) + + return cluster + + def _get_install_cmd(self, cluster: demo.FakeKubernetesCluster, host: str): + os_family = cluster.get_os_family() + package_names = [] + for pkg, hosts in self.mandatory_pkgs_setup.items(): + if host in hosts: + package_names.append( + cluster.inventory['services']['packages']['associations'][os_family][pkg]['package_name']) + + return [packages.get_package_manager(cluster.nodes['all']).get_install_cmd(package_names)] + + def _assert_installed(self, cluster: demo.FakeKubernetesCluster): + for node in cluster.nodes['all'].get_ordered_members_list(): + installation_command = self._get_install_cmd(cluster, node.get_host()) + history = cluster.fake_shell.history_find(node.get_host(), 'sudo', installation_command) + self.assertTrue(len(history) == 1 and history[0]["used_times"] == 1, + "Installation command should be called once") + + def test_default_install_debian(self): + cluster = self._new_cluster() + install.system_prepare_package_manager_manage_packages(cluster) + self._assert_installed(cluster) + + def test_default_install_rhel(self): + self.context['nodes'] = demo.generate_nodes_context(self.inventory) + for node in self.inventory['nodes']: + self.mandatory_pkgs_setup.setdefault('semanage', []).append(node['address']) + cluster = self._new_cluster() + install.system_prepare_package_manager_manage_packages(cluster) + self._assert_installed(cluster) + + def test_skip_not_managed(self): + del self.mandatory_pkgs_setup['conntrack'] + del self.mandatory_pkgs_setup['openssl'] + mandatory_section = self.inventory.setdefault('services', {}).setdefault('packages', {}).setdefault('mandatory', {}) + mandatory_section['conntrack'] = False + mandatory_section['openssl'] = False + cluster = self._new_cluster() + install.system_prepare_package_manager_manage_packages(cluster) + self._assert_installed(cluster) + + def test_install_unzip(self): + thirdparties = self.inventory.setdefault('services', {}).setdefault('thirdparties', {}) + nodes = ['balancer-1', 'master-2'] + thirdparties['target.zip'] = { + "source": "source.zip", + "unpack": "target/dir", + "nodes": nodes + } + for node in self.inventory['nodes']: + if node['name'] in nodes: + self.mandatory_pkgs_setup.setdefault('unzip', []).append(node['address']) + cluster = self._new_cluster() + install.system_prepare_package_manager_manage_packages(cluster) + self._assert_installed(cluster) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/test_keepalived.py b/test/unit/test_keepalived.py index f85dba54c..e3bc845d0 100755 --- a/test/unit/test_keepalived.py +++ b/test/unit/test_keepalived.py @@ -16,7 +16,7 @@ import unittest -from kubemarine import demo, keepalived +from kubemarine import demo, keepalived, yum class TestKeepalivedDefaultsEnrichment(unittest.TestCase): @@ -183,11 +183,7 @@ def test_keepalived_installation_when_not_installed(self): cluster.fake_shell.add(missing_package_result, 'sudo', missing_package_command) # simulate package installation - installation_command = ['yum install -y %s; rpm -q %s; if [ $? != 0 ]; then echo ' - '\"Failed to check version for some packages. ' - 'Make sure packages are not already installed with higher versions. ' - 'Also, make sure user-defined packages have rpm-compatible names. \"; exit 1; fi ' - % (package_associations['package_name'], package_associations['package_name'])] + installation_command = [yum.get_install_cmd(package_associations['package_name'])] expected_results = demo.create_nodegroup_result(cluster.nodes['keepalived'], code=0, stdout='Successfully installed keepalived') cluster.fake_shell.add(expected_results, 'sudo', installation_command) diff --git a/test/unit/test_packages.py b/test/unit/test_packages.py index d998fca5c..4133198ca 100644 --- a/test/unit/test_packages.py +++ b/test/unit/test_packages.py @@ -18,6 +18,7 @@ from kubemarine import demo, packages from kubemarine.core import static, defaults, log, errors +from kubemarine.core.yaml_merger import default_merger from kubemarine.demo import FakeKubernetesCluster from kubemarine.procedures import add_node from test.unit import utils @@ -37,7 +38,18 @@ def prepare_compiled_associations_defaults() -> dict: root = deepcopy(defs) root['globals'] = static.GLOBALS - return defaults.compile_object(logger, defs['services']['packages']['associations'], root) + compiled_defaults = defaults.compile_object(logger, defs['services']['packages']['associations'], root) + + for association_name in packages.get_associations_os_family_keys(): + os_associations: dict = deepcopy(static.GLOBALS['packages']['common_associations']) + if association_name == 'debian': + del os_associations['semanage'] + for association_params in os_associations.values(): + del association_params['groups'] + default_merger.merge(os_associations, compiled_defaults[association_name]) + compiled_defaults[association_name] = os_associations + + return compiled_defaults COMPILED_ASSOCIATIONS_DEFAULTS = prepare_compiled_associations_defaults() @@ -71,6 +83,11 @@ def set_cache_versions_false(inventory: dict, os_family: Optional[str], package: section['cache_versions'] = False +def set_mandatory_off(inventory: dict, package: str): + inventory.setdefault('services', {}).setdefault('packages', {})\ + .setdefault('mandatory', {})[package] = False + + def get_package_name(os_family, package) -> str: return packages.get_package_name(os_family, package) @@ -218,6 +235,12 @@ def test_remove_unused_os_family_associations(self): finalized_inventory = packages.remove_unused_os_family_associations(cluster, cluster.inventory) self.assertEqual({'debian'}, global_associations(finalized_inventory).keys()) + def test_semanage_debian_not_managed(self): + inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + package_associations(inventory, None, 'semanage')['package_name'] = 'policycoreutils-python-utils' + with self.assertRaisesRegex(Exception, packages.ERROR_SEMANAGE_NOT_MANAGED_DEBIAN): + new_debian_cluster(inventory) + def _nodes_context_one_different_os(self, inventory, host_different_os): nodes_context = demo.generate_nodes_context(inventory, os_name='ubuntu', os_version='20.04') nodes_context[host_different_os]['os'] = { @@ -253,7 +276,8 @@ def test_detect_versions_debian(self): results = demo.create_nodegroup_result(group, stdout=expected_pkg) cluster.fake_shell.add(results, 'sudo', [packages.get_detect_package_version_cmd('debian', 'containerd')]) - detected_packages = packages.detect_installed_packages_version_groups(group, queried_pkg) + hosts_to_packages = {host: queried_pkg for host in group.get_hosts()} + detected_packages = packages.detect_installed_packages_version_hosts(cluster, hosts_to_packages) self.assertEqual({queried_pkg}, detected_packages.keys(), "Incorrect initially queries package") @@ -275,7 +299,8 @@ def test_detect_versions_rhel(self): results = demo.create_nodegroup_result(group, stdout=expected_pkg) cluster.fake_shell.add(results, 'sudo', [packages.get_detect_package_version_cmd('rhel', 'docker-ce')]) - detected_packages = packages.detect_installed_packages_version_groups(group, [queried_pkg]) + hosts_to_packages = {host: [queried_pkg] for host in group.get_hosts()} + detected_packages = packages.detect_installed_packages_version_hosts(cluster, hosts_to_packages) self.assertEqual({queried_pkg}, detected_packages.keys(), "Incorrect initially queries package") @@ -288,12 +313,13 @@ def test_detect_versions_rhel(self): class CacheVersions(unittest.TestCase): def setUp(self) -> None: - self.inventory = demo.generate_inventory(**demo.MINIHA_KEEPALIVED) + self.inventory = demo.generate_inventory(**demo.FULLHA_KEEPALIVED) self.context = demo.create_silent_context(procedure='add_node') self.context['nodes'] = demo.generate_nodes_context(self.inventory, os_name='ubuntu', os_version='20.04') self.hosts = [node['address'] for node in self.inventory['nodes']] - self.new_host = self.inventory['nodes'][0]['address'] - self.procedure_inventory = {'nodes': [self.inventory['nodes'].pop(0)]} + first_master_idx = next(i for i, node in enumerate(self.inventory['nodes']) if 'master' in node['roles']) + self.new_host = self.inventory['nodes'][first_master_idx]['address'] + self.procedure_inventory = {'nodes': [self.inventory['nodes'].pop(first_master_idx)]} self.initial_hosts = [node['address'] for node in self.inventory['nodes']] def _new_cluster(self): @@ -307,15 +333,16 @@ def _packages_include(self, inventory: dict): .setdefault('include', []) def test_cache_versions_and_finalize_inventory(self): - self._packages_install(self.inventory).extend(['curl', 'unzip']) + self._packages_install(self.inventory).extend(['curl2', 'unzip2']) cluster = self._new_cluster() utils.stub_associations_packages(cluster, { 'containerd': {host: 'containerd=1.5.9-0ubuntu1~20.04.4' for host in self.initial_hosts}, 'auditd': {host: 'auditd=1:2.8.5-2ubuntu6' for host in self.initial_hosts}, + 'kmod': {host: 'kmod=27-1ubuntu2.1' for host in self.initial_hosts} }) utils.stub_detect_packages(cluster, { - 'curl': {host: 'curl=7.68.0-1ubuntu2.14' for host in self.hosts}, - 'unzip': {host: 'unzip=6.0-25ubuntu1.1' for host in self.hosts}, + 'curl2': {host: 'curl2=7.68.0-1ubuntu2.14' for host in self.hosts}, + 'unzip2': {host: 'unzip2=6.0-25ubuntu1.1' for host in self.hosts}, }) cache_installed_packages(cluster) @@ -326,7 +353,10 @@ def test_cache_versions_and_finalize_inventory(self): self.assertEqual('auditd=1:2.8.5-2ubuntu6', package_associations(cluster.inventory, 'debian', 'audit')['package_name'], "auditd was not detected") - self.assertEqual({'curl', 'unzip'}, set(self._packages_include(cluster.inventory)), + self.assertEqual('kmod=27-1ubuntu2.1', + package_associations(cluster.inventory, 'debian', 'kmod')['package_name'], + "kmod was not detected") + self.assertEqual({'curl2', 'unzip2'}, set(self._packages_include(cluster.inventory)), "Custom packages versions should be not detected when adding node") finalized_inventory = utils.make_finalized_inventory(cluster) @@ -336,18 +366,25 @@ def test_cache_versions_and_finalize_inventory(self): self.assertEqual('auditd=1:2.8.5-2ubuntu6', package_associations(finalized_inventory, 'debian', 'audit')['package_name'], "auditd was not detected") - self.assertEqual({'curl=7.68.0-1ubuntu2.14', 'unzip=6.0-25ubuntu1.1'}, set(self._packages_include(finalized_inventory)), + self.assertEqual('kmod=27-1ubuntu2.1', + package_associations(finalized_inventory, 'debian', 'kmod')['package_name'], + "kmod was not detected") + self.assertEqual({'curl2=7.68.0-1ubuntu2.14', 'unzip2=6.0-25ubuntu1.1'}, set(self._packages_include(finalized_inventory)), "Custom packages versions should be detected in finalized inventory") def test_cache_versions_global_off(self): expected_containerd = 'containerd=1.5.9-0ubuntu1~20.04.4' + expected_kmod = 'kmod=27-1ubuntu2.1' default_containerd = get_compiled_defaults()['debian']['containerd']['package_name'][0] + default_kmod = get_compiled_defaults()['debian']['kmod']['package_name'] self.assertNotEqual(expected_containerd, default_containerd) + self.assertNotEqual(expected_kmod, default_kmod) set_cache_versions_false(self.inventory, None, None) cluster = self._new_cluster() utils.stub_associations_packages(cluster, { 'containerd': {host: expected_containerd for host in self.initial_hosts}, + 'kmod': {host: expected_kmod for host in self.initial_hosts}, }) cache_installed_packages(cluster) @@ -355,23 +392,32 @@ def test_cache_versions_global_off(self): self.assertEqual(default_containerd, package_associations(cluster.inventory, 'debian', 'containerd')['package_name'][0], "containerd should be default because caching versions is off") + self.assertEqual(default_kmod, + package_associations(cluster.inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because caching versions is off") finalized_inventory = utils.make_finalized_inventory(cluster) self.assertEqual(expected_containerd, package_associations(finalized_inventory, 'debian', 'containerd')['package_name'][0], "containerd was not detected") + self.assertEqual(expected_kmod, + package_associations(finalized_inventory, 'debian', 'kmod')['package_name'], + "kmod was not detected") def test_cache_versions_specific_off(self): default_containerd = get_compiled_defaults()['debian']['containerd']['package_name'][0] default_haproxy = get_compiled_defaults()['debian']['haproxy']['package_name'] + default_curl = get_compiled_defaults()['debian']['curl']['package_name'] set_cache_versions_false(self.inventory, None, 'containerd') set_cache_versions_false(self.inventory, 'debian', 'haproxy') + set_cache_versions_false(self.inventory, 'debian', 'curl') cluster = self._new_cluster() utils.stub_associations_packages(cluster, { 'containerd': {host: 'containerd=1.5.9-0ubuntu1~20.04.4' for host in self.initial_hosts}, 'auditd': {host: 'auditd=1:2.8.5-2ubuntu6' for host in self.initial_hosts}, 'haproxy': {host: 'haproxy=2.0.29-0ubuntu1' for host in self.initial_hosts}, + 'curl': {host: 'curl=7.68.0-1ubuntu2.14' for host in self.initial_hosts}, }) cache_installed_packages(cluster) @@ -385,6 +431,9 @@ def test_cache_versions_specific_off(self): self.assertEqual(default_haproxy, package_associations(cluster.inventory, 'debian', 'haproxy')['package_name'], "haproxy should be default because caching versions is off") + self.assertEqual(default_curl, + package_associations(cluster.inventory, 'debian', 'curl')['package_name'], + "curl should be default because caching versions is off") finalized_inventory = utils.make_finalized_inventory(cluster) self.assertEqual('containerd=1.5.9-0ubuntu1~20.04.4', @@ -396,39 +445,155 @@ def test_cache_versions_specific_off(self): self.assertEqual('haproxy=2.0.29-0ubuntu1', package_associations(finalized_inventory, 'debian', 'haproxy')['package_name'], "haproxy was not detected") + self.assertEqual('curl=7.68.0-1ubuntu2.14', + package_associations(finalized_inventory, 'debian', 'curl')['package_name'], + "curl was not detected") - def test_add_node_fails_different_package_versions(self): + def test_skip_cache_versions_not_managed(self): + default_kmod = get_compiled_defaults()['debian']['kmod']['package_name'] + default_docker = get_compiled_defaults()['debian']['docker']['package_name'][0] + + set_mandatory_off(self.inventory, 'kmod') cluster = self._new_cluster() utils.stub_associations_packages(cluster, { - 'containerd': { - self.initial_hosts[0]: 'containerd=1.5.9-0ubuntu1~20.04.4', - self.initial_hosts[1]: 'containerd=2', - }, + 'kmod': {host: 'kmod=27-1ubuntu2.1' for host in self.initial_hosts}, + 'curl': {host: 'curl=7.68.0-1ubuntu2.14' for host in self.initial_hosts}, + 'docker-ce': {host: 'docker-ce=1' for host in self.initial_hosts}, }) + cache_installed_packages(cluster) + + self.assertEqual(default_kmod, + package_associations(cluster.inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because automatic management is off") + self.assertEqual('curl=7.68.0-1ubuntu2.14', + package_associations(cluster.inventory, 'debian', 'curl')['package_name'], + "curl was not detected") + self.assertEqual(default_docker, + package_associations(cluster.inventory, 'debian', 'docker')['package_name'][0], + "docker should be default because cluster is based on containerd") + + finalized_inventory = utils.make_finalized_inventory(cluster) + self.assertEqual(default_kmod, + package_associations(finalized_inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because automatic management is off") + self.assertEqual('curl=7.68.0-1ubuntu2.14', + package_associations(finalized_inventory, 'debian', 'curl')['package_name'], + "curl was not detected") + self.assertEqual(default_docker, + package_associations(finalized_inventory, 'debian', 'docker')['package_name'][0], + "docker should be default because cluster is based on containerd") + + def test_add_node_fails_different_package_versions(self): + cluster = self._new_cluster() + containerd_hosts_stub = {} + last_k8s_host = '' + for i, node in enumerate(self.inventory['nodes']): + if 'master' in node['roles'] or 'worker' in node['roles']: + last_k8s_host = node['address'] + containerd_hosts_stub[last_k8s_host] = 'containerd=1.5.9-0ubuntu1~20.04.4' + containerd_hosts_stub[last_k8s_host] = 'containerd=2' + utils.stub_associations_packages(cluster, {'containerd': containerd_hosts_stub}) + + expected_error_regex = packages.ERROR_MULTIPLE_PACKAGE_VERSIONS_DETECTED.replace('%s', '.*') + with self.assertRaisesRegex(Exception, expected_error_regex): + cache_installed_packages(cluster) + + def test_add_node_fails_different_mandatory_package_versions(self): + cluster = self._new_cluster() + conntrack_hosts_stub = {} + last_k8s_host = '' + for i, node in enumerate(self.inventory['nodes']): + if 'master' in node['roles'] or 'worker' in node['roles']: + last_k8s_host = node['address'] + conntrack_hosts_stub[last_k8s_host] = 'conntrack=1:1.4.5-2' + conntrack_hosts_stub[last_k8s_host] = 'conntrack=2' + utils.stub_associations_packages(cluster, {'conntrack': conntrack_hosts_stub}) + expected_error_regex = packages.ERROR_MULTIPLE_PACKAGE_VERSIONS_DETECTED.replace('%s', '.*') with self.assertRaisesRegex(Exception, expected_error_regex): cache_installed_packages(cluster) + def test_add_node_success_cache_by_initial_nodes(self): + cluster = self._new_cluster() + packages_hosts_stub = {'containerd': {}, 'conntrack': {}} + for i, node in enumerate(self.inventory['nodes']): + host = node['address'] + if 'master' in node['roles'] or 'worker' in node['roles']: + packages_hosts_stub['containerd'][host] = 'containerd=1.5.9-0ubuntu1~20.04.4' + packages_hosts_stub['conntrack'][host] = 'conntrack=1:1.4.5-2' + + packages_hosts_stub['containerd'][self.new_host] = 'containerd=2' + packages_hosts_stub['conntrack'][self.new_host] = 'conntrack=2' + utils.stub_associations_packages(cluster, packages_hosts_stub) + + cache_installed_packages(cluster) + + self.assertEqual('containerd=1.5.9-0ubuntu1~20.04.4', + package_associations(cluster.inventory, 'debian', 'containerd')['package_name'][0], + "containerd should be detected by initial nodes") + self.assertEqual('conntrack=1:1.4.5-2', + package_associations(cluster.inventory, 'debian', 'conntrack')['package_name'], + "conntrack should be detected by initial nodes") + + def test_add_node_success_cache_versions_managed_nodes(self): + cluster = self._new_cluster() + packages_hosts_stub = {'containerd': {}, 'conntrack': {}} + for i, node in enumerate(self.inventory['nodes']): + host = node['address'] + if 'master' in node['roles'] or 'worker' in node['roles']: + packages_hosts_stub['containerd'][host] = 'containerd=1.5.9-0ubuntu1~20.04.4' + packages_hosts_stub['conntrack'][host] = 'conntrack=1:1.4.5-2' + else: + packages_hosts_stub['containerd'][host] = 'containerd=2' + packages_hosts_stub['conntrack'][host] = 'conntrack=2' + + utils.stub_associations_packages(cluster, packages_hosts_stub) + + cache_installed_packages(cluster) + + self.assertEqual('containerd=1.5.9-0ubuntu1~20.04.4', + package_associations(cluster.inventory, 'debian', 'containerd')['package_name'][0], + "containerd was not detected") + self.assertEqual('conntrack=1:1.4.5-2', + package_associations(cluster.inventory, 'debian', 'conntrack')['package_name'], + "conntrack was not detected") + + finalized_inventory = utils.make_finalized_inventory(cluster) + self.assertEqual('containerd=1.5.9-0ubuntu1~20.04.4', + package_associations(finalized_inventory, 'debian', 'containerd')['package_name'][0], + "containerd was not detected") + self.assertEqual('conntrack=1:1.4.5-2', + package_associations(finalized_inventory, 'debian', 'conntrack')['package_name'], + "conntrack was not detected") + def test_finalize_inventory_different_package_versions(self): default_containerd = get_compiled_defaults()['debian']['containerd']['package_name'][0] + default_kmod = get_compiled_defaults()['debian']['kmod']['package_name'] - self._packages_install(self.inventory).extend(['curl=7.*', 'unzip=6.*']) + self._packages_install(self.inventory).extend(['curl2=7.*', 'unzip2=6.*']) cluster = self._new_cluster() - utils.stub_associations_packages(cluster, { - 'containerd': { - self.initial_hosts[0]: 'containerd=1.5.9-0ubuntu1~20.04.4', - self.initial_hosts[1]: 'containerd=2', - }, - 'auditd': {host: 'auditd=1:2.8.5-2ubuntu6' for host in self.initial_hosts}, - }) + packages_hosts_stub = { + 'containerd': {}, 'auditd': {}, 'kmod': {} + } + last_host = '' + for last_host in self.initial_hosts: + packages_hosts_stub['containerd'][last_host] = 'containerd=1.5.9-0ubuntu1~20.04.4' + packages_hosts_stub['auditd'][last_host] = 'auditd=1:2.8.5-2ubuntu6' + packages_hosts_stub['kmod'][last_host] = 'kmod=27-1ubuntu2.1' + + packages_hosts_stub['containerd'][last_host] = 'containerd=2' + packages_hosts_stub['kmod'][last_host] = 'kmod=28' + utils.stub_associations_packages(cluster, packages_hosts_stub) + + custom_hosts_stub = {} + for last_host in self.initial_hosts: + custom_hosts_stub[last_host] = 'curl2=7.68.0-1ubuntu2.14' + custom_hosts_stub[last_host] = 'curl2=2' utils.stub_detect_packages(cluster, { - 'curl': { - self.initial_hosts[0]: 'curl=7.68.0-1ubuntu2.14', - self.initial_hosts[1]: 'curl=2', - }, - 'unzip': {host: 'unzip=6.0-25ubuntu1.1' for host in self.hosts}, + 'curl2': custom_hosts_stub, + 'unzip2': {host: 'unzip2=6.0-25ubuntu1.1' for host in self.hosts}, }) finalized_inventory = utils.make_finalized_inventory(cluster) @@ -438,21 +603,26 @@ def test_finalize_inventory_different_package_versions(self): self.assertEqual('auditd=1:2.8.5-2ubuntu6', package_associations(finalized_inventory, 'debian', 'audit')['package_name'], "auditd was not detected") - self.assertEqual({'curl=7.*', 'unzip=6.0-25ubuntu1.1'}, set(self._packages_include(finalized_inventory)), + self.assertEqual(default_kmod, + package_associations(finalized_inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because multiple versions are installed") + self.assertEqual({'curl2=7.*', 'unzip2=6.0-25ubuntu1.1'}, set(self._packages_include(finalized_inventory)), "Custom packages versions should be partially detected in finalized inventory") def test_not_cache_versions_if_multiple_os_family_versions(self): default_containerd = get_compiled_defaults()['debian']['containerd']['package_name'][0] + default_kmod = get_compiled_defaults()['debian']['kmod']['package_name'] self.context['nodes'][self.new_host]['os']['version'] = '22.04' - self._packages_install(self.inventory).extend(['curl=7.*']) + self._packages_install(self.inventory).extend(['curl2=7.*']) cluster = self._new_cluster() utils.stub_associations_packages(cluster, { 'containerd': {host: 'containerd=1.5.9-0ubuntu1~20.04.4' for host in self.initial_hosts}, + 'kmod': {host: 'kmod=27-1ubuntu2.1' for host in self.initial_hosts}, }) utils.stub_detect_packages(cluster, { - 'curl': {host: 'curl=7.68.0-1ubuntu2.14' for host in self.hosts}, + 'curl2': {host: 'curl2=7.68.0-1ubuntu2.14' for host in self.hosts}, }) cache_installed_packages(cluster) @@ -460,12 +630,18 @@ def test_not_cache_versions_if_multiple_os_family_versions(self): self.assertEqual(default_containerd, package_associations(cluster.inventory, 'debian', 'containerd')['package_name'][0], "containerd should be default because multiple OS versions are detected") + self.assertEqual(default_kmod, + package_associations(cluster.inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because multiple OS versions are detected") finalized_inventory = utils.make_finalized_inventory(cluster) self.assertEqual(default_containerd, package_associations(finalized_inventory, 'debian', 'containerd')['package_name'][0], "containerd should be default because multiple OS versions are detected") - self.assertEqual({'curl=7.*'}, set(self._packages_include(finalized_inventory)), + self.assertEqual(default_kmod, + package_associations(finalized_inventory, 'debian', 'kmod')['package_name'], + "kmod should be default because multiple OS versions are detected") + self.assertEqual({'curl2=7.*'}, set(self._packages_include(finalized_inventory)), "Custom packages should be default because multiple OS versions are detected")