From bf3bec7bca9c3b6d52511cba2b2c47c2917a075e Mon Sep 17 00:00:00 2001 From: Guillaume Dore Date: Mon, 5 Jul 2021 15:56:02 +0200 Subject: [PATCH] add option to use kubectl with sudo --- agent_nodes.tf | 23 ++++++++++++++--------- server_nodes.tf | 20 ++++++++++++-------- variables.tf | 6 ++++++ 3 files changed, 32 insertions(+), 17 deletions(-) diff --git a/agent_nodes.tf b/agent_nodes.tf index bd6b354..c739757 100644 --- a/agent_nodes.tf +++ b/agent_nodes.tf @@ -61,6 +61,7 @@ locals { ))) } } + kubectl_cmd = var.use_sudo ? "sudo kubectl" : "kubectl" } // Install k3s agent @@ -129,6 +130,7 @@ resource "null_resource" "agents_drain" { agent_name = local.agents_metadata[split(var.separator, each.key)[0]].name connection_json = base64encode(jsonencode(local.root_server_connection)) drain_timeout = var.drain_timeout + kubectl_cmd = local.kubectl_cmd } // Because we use triggers as memory area, we need to ignore all changes on it. lifecycle { ignore_changes = [triggers] } @@ -166,7 +168,7 @@ resource "null_resource" "agents_drain" { provisioner "remote-exec" { when = destroy inline = [ - "kubectl drain ${self.triggers.agent_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}" + "${self.triggers.kubectl_cmd} drain ${self.triggers.agent_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}" ] } } @@ -184,6 +186,7 @@ resource "null_resource" "agents_annotation" { // Because some fields must be used on destruction, we need to store them into the current // object. The only way to do that is to use triggers to store theses fields. connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } // Because we dont care about connection modification, we ignore its changes. lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -221,14 +224,14 @@ resource "null_resource" "agents_annotation" { provisioner "remote-exec" { inline = [ "until kubectl get node ${self.triggers.agent_name}; do sleep 1; done", - "kubectl annotate --overwrite node ${self.triggers.agent_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}" + "${self.triggers.kubectl_cmd} annotate --overwrite node ${self.triggers.agent_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl annotate node ${self.triggers.agent_name} ${self.triggers.annotation_name}-" + "${self.triggers.kubectl_cmd} annotate node ${self.triggers.agent_name} ${self.triggers.annotation_name}-" ] } } @@ -246,6 +249,7 @@ resource "null_resource" "agents_label" { // Because some fields must be used on destruction, we need to store them into the current // object. The only way to do that is to use triggers to store theses fields. connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } // Because we dont care about connection modification, we ignore its changes. lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -282,15 +286,15 @@ resource "null_resource" "agents_label" { provisioner "remote-exec" { inline = [ - "until kubectl get node ${self.triggers.agent_name}; do sleep 1; done", - "kubectl label --overwrite node ${self.triggers.agent_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}" + "until ${self.triggers.kubectl_cmd} get node ${self.triggers.agent_name}; do sleep 1; done", + "${self.triggers.kubectl_cmd} label --overwrite node ${self.triggers.agent_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl label node ${self.triggers.agent_name} ${self.triggers.label_name}-" + "${self.triggers.kubectl_cmd} label node ${self.triggers.agent_name} ${self.triggers.label_name}-" ] } } @@ -308,6 +312,7 @@ resource "null_resource" "agents_taint" { // Because some fields must be used on destruction, we need to store them into the current // object. The only way to do that is to use triggers to store theses fields. connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } // Because we dont care about connection modification, we ignore its changes. lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -344,15 +349,15 @@ resource "null_resource" "agents_taint" { provisioner "remote-exec" { inline = [ - "until kubectl get node ${self.triggers.agent_name}; do sleep 1; done", - "kubectl taint node ${self.triggers.agent_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite" + "until ${self.triggers.kubectl_cmd} get node ${self.triggers.agent_name}; do sleep 1; done", + "${self.triggers.kubectl_cmd} taint node ${self.triggers.agent_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl taint node ${self.triggers.agent_name} ${self.triggers.taint_name}-" + "${self.triggers.kubectl_cmd} taint node ${self.triggers.agent_name} ${self.triggers.taint_name}-" ] } } diff --git a/server_nodes.tf b/server_nodes.tf index 98bc473..a015185 100644 --- a/server_nodes.tf +++ b/server_nodes.tf @@ -206,7 +206,7 @@ resource "null_resource" "servers_install" { provisioner "remote-exec" { inline = [ "INSTALL_K3S_VERSION=${local.k3s_version} sh /tmp/k3s-installer server ${local.servers_metadata[each.key].flags}", - "until kubectl get node ${local.servers_metadata[each.key].name}; do sleep 1; done" + "until ${local.kubectl_cmd} get node ${local.servers_metadata[each.key].name}; do sleep 1; done" ] } } @@ -220,6 +220,7 @@ resource "null_resource" "servers_drain" { server_name = local.servers_metadata[split(var.separator, each.key)[0]].name connection_json = base64encode(jsonencode(local.root_server_connection)) drain_timeout = var.drain_timeout + kubectl_cmd = local.kubectl_cmd } lifecycle { ignore_changes = [triggers] } @@ -256,7 +257,7 @@ resource "null_resource" "servers_drain" { provisioner "remote-exec" { when = destroy inline = [ - "kubectl drain ${self.triggers.server_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}" + "${self.triggers.kubectl_cmd} drain ${self.triggers.server_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}" ] } } @@ -272,6 +273,7 @@ resource "null_resource" "servers_annotation" { on_value_changes = each.value connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -307,14 +309,14 @@ resource "null_resource" "servers_annotation" { provisioner "remote-exec" { inline = [ - "kubectl annotate --overwrite node ${self.triggers.server_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}" + "${self.triggers.kubectl_cmd} annotate --overwrite node ${self.triggers.server_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl annotate node ${self.triggers.server_name} ${self.triggers.annotation_name}-" + "${self.triggers.kubectl_cmd} annotate node ${self.triggers.server_name} ${self.triggers.annotation_name}-" ] } } @@ -330,6 +332,7 @@ resource "null_resource" "servers_label" { on_value_changes = each.value connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -365,14 +368,14 @@ resource "null_resource" "servers_label" { provisioner "remote-exec" { inline = [ - "kubectl label --overwrite node ${self.triggers.server_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}" + "${self.triggers.kubectl_cmd} label --overwrite node ${self.triggers.server_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl label node ${self.triggers.server_name} ${self.triggers.label_name}-" + "${self.triggers.kubectl_cmd} label node ${self.triggers.server_name} ${self.triggers.label_name}-" ] } } @@ -389,6 +392,7 @@ resource "null_resource" "servers_taint" { on_value_changes = each.value connection_json = base64encode(jsonencode(local.root_server_connection)) + kubectl_cmd = local.kubectl_cmd } lifecycle { ignore_changes = [triggers["connection_json"]] } @@ -424,14 +428,14 @@ resource "null_resource" "servers_taint" { provisioner "remote-exec" { inline = [ - "kubectl taint node ${self.triggers.server_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite" + "${self.triggers.kubectl_cmd} taint node ${self.triggers.server_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite" ] } provisioner "remote-exec" { when = destroy inline = [ - "kubectl taint node ${self.triggers.server_name} ${self.triggers.taint_name}-" + "${self.triggers.kubectl_cmd} taint node ${self.triggers.server_name} ${self.triggers.taint_name}-" ] } } diff --git a/variables.tf b/variables.tf index 7286603..6a19214 100644 --- a/variables.tf +++ b/variables.tf @@ -146,3 +146,9 @@ variable "separator" { description = "Separator used to separates node name and field name (used to manage annotations, labels and taints)." default = "|" } + +variable "use_sudo" { + description = "Whether or not to use kubectl with sudo during cluster setup." + default = false + type = bool +}