From 8a91e83210452e5421e81e755d8f97f363d03e16 Mon Sep 17 00:00:00 2001 From: Em Sharnoff Date: Fri, 29 Dec 2023 12:07:05 -0800 Subject: [PATCH] Enable golangci-lint on neonvm/ (#721) Lots of changes to .golangci.yml to add exemptions for exhaustruct. Also lots of smaller changes in neonvm/ to resolve the lint issues. This will probably have conflicts with other ongoing work. --- .golangci.yml | 34 ++++-- neonvm/apis/neonvm/v1/groupversion_info.go | 3 +- neonvm/apis/neonvm/v1/ippool_types.go | 2 +- neonvm/apis/neonvm/v1/virtualmachine_types.go | 4 +- .../apis/neonvm/v1/virtualmachine_webhook.go | 3 +- .../v1/virtualmachinemigration_types.go | 2 +- .../v1/virtualmachinemigration_webhook.go | 7 +- neonvm/apis/neonvm/v1/webhook_suite_test.go | 10 +- neonvm/controllers/suite_test.go | 7 +- .../controllers/virtualmachine_controller.go | 72 ++++++----- .../virtualmachine_controller_test.go | 10 +- .../controllers/virtualmachine_qmp_queries.go | 113 ++++++++++++------ .../virtualmachinemigration_controller.go | 11 +- neonvm/main.go | 21 ++-- neonvm/pkg/ipam-demo.go | 9 +- neonvm/pkg/ipam/allocate.go | 17 ++- neonvm/pkg/ipam/client.go | 3 +- neonvm/pkg/ipam/ipam.go | 83 +++++++------ neonvm/runner/main.go | 65 +++++----- neonvm/tools/vm-builder/main.go | 52 ++++---- neonvm/tools/vxlan/controller/main.go | 10 +- 21 files changed, 313 insertions(+), 225 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 922dfcab6..8650d8533 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,9 +2,6 @@ run: deadline: 5m issues-exit-code: 1 - # TODO: fix all the issues for neonvm and enable linter for it - skip-dirs: - - neonvm issues: exclude: @@ -53,20 +50,37 @@ linters-settings: # see: exhaustruct: exclude: + - '^crypto/tls\.Config$' - '^net/http\.(Client|Server)' - - '^net\.TCPAddr$' - # metav1.{CreateOptions,GetOptions,ListOptions,WatchOptions,PatchOptions,DeleteOptions} - - '^k8s\.io/apimachinery/pkg/apis/meta/v1\.(Create|Get|List|Watch|Patch|Delete)Options$' - - '^k8s\.io/apimachinery/pkg/apis/meta/v1\.ObjectMeta$' + - '^net\.(Dialer|TCPAddr)$' + - '^archive/tar\.Header$' + - '^k8s\.io/api/core/v1\.\w+$' - '^k8s\.io/apimachinery/pkg/api/resource\.Quantity$' - - '^github.com/prometheus/client_golang/prometheus(/.*)?\.\w+Opts$' + # metav1.{CreateOptions,GetOptions,ListOptions,WatchOptions,PatchOptions,UpdateOptions,DeleteOptions} + - '^k8s\.io/apimachinery/pkg/apis/meta/v1\.(Create|Get|List|Watch|Patch|Update|Delete)Options$' + - '^k8s\.io/apimachinery/pkg/apis/meta/v1\.(Condition|LabelSelector|ObjectMeta)$' + - '^k8s\.io/client-go/tools/leaderelection/resourcelock\.ResourceLockConfig$' + - '^k8s\.io/client-go/tools/leaderelection\.(LeaderCallbacks|LeaderElectionConfig)$' + - '^sigs\.k8s\.io/controller-runtime/pkg/client\.Options$' + - '^sigs\.k8s\.io/controller-runtime/pkg/controller\.Options$' + - '^sigs\.k8s\.io/controller-runtime/pkg/envtest\.(Environment|WebhookInstallOptions)$' + - '^sigs\.k8s\.io/controller-runtime/pkg/manager\.Options$' + - '^sigs\.k8s\.io/controller-runtime/pkg/reconcile\.Result$' + - '^sigs\.k8s\.io/controller-runtime/pkg/scheme\.Builder$' - '^github\.com/containerd/cgroups/v3/cgroup2\.(Resources|Memory)' + - '^github\.com/containerd/cgroups/v3/cgroup2\.CPU$' + - '^github\.com/docker/docker/api/types/container\.Config$' + - '^github\.com/docker/docker/api/types\.\w+Options$' + - '^github\.com/opencontainers/runtime-spec/specs-go\.\w+$' # Exempt the entire package. Too many big structs. + - '^github\.com/prometheus/client_golang/prometheus(/.*)?\.\w+Opts$' - '^github\.com/tychoish/fun/pubsub\.BrokerOptions$' - - '^github\.com/neondatabase/autoscaling/pkg/util/patch\.Operation$' - - '^github\.com/neondatabase/autoscaling/pkg/util/watch\.HandlerFuncs$' + - '^github\.com/vishvananda/netlink\.\w+$' # Exempt the entire package. Too many big structs. # vmapi.{VirtualMachine,VirtualMachineSpec,VirtualMachineMigration,VirtualMachineMigrationSpec} - '^github\.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1\.VirtualMachine(Migration)?(Spec)?$' + - '^github\.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1\.IPPool$' - '^github\.com/neondatabase/autoscaling/pkg/agent/core\.ActionSet$' + - '^github\.com/neondatabase/autoscaling/pkg/util/patch\.Operation$' + - '^github\.com/neondatabase/autoscaling/pkg/util/watch\.HandlerFuncs$' # see: gci: diff --git a/neonvm/apis/neonvm/v1/groupversion_info.go b/neonvm/apis/neonvm/v1/groupversion_info.go index bbbf8d381..0e325375a 100644 --- a/neonvm/apis/neonvm/v1/groupversion_info.go +++ b/neonvm/apis/neonvm/v1/groupversion_info.go @@ -20,8 +20,9 @@ limitations under the License. package v1 import ( - "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" + + "k8s.io/apimachinery/pkg/runtime/schema" ) var ( diff --git a/neonvm/apis/neonvm/v1/ippool_types.go b/neonvm/apis/neonvm/v1/ippool_types.go index 936d93a47..f0b2e4cf4 100644 --- a/neonvm/apis/neonvm/v1/ippool_types.go +++ b/neonvm/apis/neonvm/v1/ippool_types.go @@ -42,5 +42,5 @@ type IPPoolList struct { } func init() { - SchemeBuilder.Register(&IPPool{}, &IPPoolList{}) + SchemeBuilder.Register(&IPPool{}, &IPPoolList{}) //nolint:exhaustruct // just being used to provide the types } diff --git a/neonvm/apis/neonvm/v1/virtualmachine_types.go b/neonvm/apis/neonvm/v1/virtualmachine_types.go index 696db980e..bf138b5d8 100644 --- a/neonvm/apis/neonvm/v1/virtualmachine_types.go +++ b/neonvm/apis/neonvm/v1/virtualmachine_types.go @@ -307,7 +307,7 @@ const ( ) type Disk struct { - //Disk's name. + // Disk's name. // Must be a DNS_LABEL and unique within the virtual machine. Name string `json:"name"` // Mounted read-only if true, read-write otherwise (false or unspecified). @@ -466,5 +466,5 @@ type VirtualMachineList struct { } func init() { - SchemeBuilder.Register(&VirtualMachine{}, &VirtualMachineList{}) + SchemeBuilder.Register(&VirtualMachine{}, &VirtualMachineList{}) //nolint:exhaustruct // just being used to provide the types } diff --git a/neonvm/apis/neonvm/v1/virtualmachine_webhook.go b/neonvm/apis/neonvm/v1/virtualmachine_webhook.go index 3b94ae363..6fc85ffac 100644 --- a/neonvm/apis/neonvm/v1/virtualmachine_webhook.go +++ b/neonvm/apis/neonvm/v1/virtualmachine_webhook.go @@ -21,10 +21,11 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + + "k8s.io/apimachinery/pkg/runtime" ) // log is for logging in this package. diff --git a/neonvm/apis/neonvm/v1/virtualmachinemigration_types.go b/neonvm/apis/neonvm/v1/virtualmachinemigration_types.go index f47227a86..3de246363 100644 --- a/neonvm/apis/neonvm/v1/virtualmachinemigration_types.go +++ b/neonvm/apis/neonvm/v1/virtualmachinemigration_types.go @@ -177,5 +177,5 @@ type VirtualMachineMigrationList struct { } func init() { - SchemeBuilder.Register(&VirtualMachineMigration{}, &VirtualMachineMigrationList{}) + SchemeBuilder.Register(&VirtualMachineMigration{}, &VirtualMachineMigrationList{}) //nolint:exhaustruct // just being used to provide the types } diff --git a/neonvm/apis/neonvm/v1/virtualmachinemigration_webhook.go b/neonvm/apis/neonvm/v1/virtualmachinemigration_webhook.go index b438e2f37..034c702b8 100644 --- a/neonvm/apis/neonvm/v1/virtualmachinemigration_webhook.go +++ b/neonvm/apis/neonvm/v1/virtualmachinemigration_webhook.go @@ -17,14 +17,11 @@ limitations under the License. package v1 import ( - "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" -) -// log is for logging in this package. -var virtualmachinemigrationlog = logf.Log.WithName("virtualmachinemigration-resource") + "k8s.io/apimachinery/pkg/runtime" +) func (r *VirtualMachineMigration) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). diff --git a/neonvm/apis/neonvm/v1/webhook_suite_test.go b/neonvm/apis/neonvm/v1/webhook_suite_test.go index 557b35fcd..92abfa9b1 100644 --- a/neonvm/apis/neonvm/v1/webhook_suite_test.go +++ b/neonvm/apis/neonvm/v1/webhook_suite_test.go @@ -27,16 +27,16 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" - //+kubebuilder:scaffold:imports - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + //+kubebuilder:scaffold:imports + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/neonvm/controllers/suite_test.go b/neonvm/controllers/suite_test.go index 81878eb17..fc90caa52 100644 --- a/neonvm/controllers/suite_test.go +++ b/neonvm/controllers/suite_test.go @@ -22,16 +22,15 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" - //+kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/neonvm/controllers/virtualmachine_controller.go b/neonvm/controllers/virtualmachine_controller.go index 0f3d12314..bd5d552cd 100644 --- a/neonvm/controllers/virtualmachine_controller.go +++ b/neonvm/controllers/virtualmachine_controller.go @@ -29,6 +29,13 @@ import ( "strconv" "time" + nadapiv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -38,17 +45,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" - nadapiv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" "github.com/neondatabase/autoscaling/neonvm/controllers/buildtag" "github.com/neondatabase/autoscaling/neonvm/pkg/ipam" - "github.com/neondatabase/autoscaling/pkg/api" "github.com/neondatabase/autoscaling/pkg/util/patch" ) @@ -134,11 +134,7 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque if controllerutil.ContainsFinalizer(&virtualmachine, virtualmachineFinalizer) { // our finalizer is present, so lets handle any external dependency log.Info("Performing Finalizer Operations for VirtualMachine before delete it") - if err := r.doFinalizerOperationsForVirtualMachine(ctx, &virtualmachine); err != nil { - // if fail to delete the external dependency here, return with error - // so that it can be retried - return ctrl.Result{}, err - } + r.doFinalizerOperationsForVirtualMachine(ctx, &virtualmachine) // remove our finalizer from the list and update it. log.Info("Removing Finalizer for VirtualMachine after successfully perform the operations") @@ -193,8 +189,8 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{RequeueAfter: time.Second}, nil } -// finalizeVirtualMachine will perform the required operations before delete the CR. -func (r *VirtualMachineReconciler) doFinalizerOperationsForVirtualMachine(ctx context.Context, virtualmachine *vmv1.VirtualMachine) error { +// doFinalizerOperationsForVirtualMachine will perform the required operations before delete the CR. +func (r *VirtualMachineReconciler) doFinalizerOperationsForVirtualMachine(ctx context.Context, virtualmachine *vmv1.VirtualMachine) { // TODO(user): Add the cleanup steps that the operator // needs to do before the CR can be deleted. Examples // of finalizers include performing backups and deleting @@ -221,33 +217,31 @@ func (r *VirtualMachineReconciler) doFinalizerOperationsForVirtualMachine(ctx co if err != nil { // ignore error log.Error(err, "ignored error") - return nil + return } nadNamespace, err := nadIpamNamespace() if err != nil { // ignore error log.Error(err, "ignored error") - return nil + return } ipam, err := ipam.New(ctx, nadName, nadNamespace) if err != nil { // ignore error log.Error(err, "ignored error") - return nil + return } defer ipam.Close() ip, err := ipam.ReleaseIP(ctx, virtualmachine.Name, virtualmachine.Namespace) if err != nil { // ignore error log.Error(err, "fail to release IP, error ignored") - return nil + return } message := fmt.Sprintf("Released IP %s", ip.String()) log.Info(message) r.Recorder.Event(virtualmachine, "Normal", "OverlayNet", message) } - - return nil } func runnerSupportsCgroup(pod *corev1.Pod) bool { @@ -268,7 +262,8 @@ func (r *VirtualMachineReconciler) updateVMStatusCPU( ctx context.Context, virtualmachine *vmv1.VirtualMachine, vmRunner *corev1.Pod, - qmpPluggedCPUs uint32, supportsCgroup bool, cgroupUsage api.VCPUCgroup, + qmpPluggedCPUs uint32, + cgroupUsage *api.VCPUCgroup, ) { log := log.FromContext(ctx) @@ -277,7 +272,7 @@ func (r *VirtualMachineReconciler) updateVMStatusCPU( // - vm.Status.CPUs.RoundUp() == qmpPluggedCPUs // Otherwise, we update the status. var currentCPUUsage vmv1.MilliCPU - if supportsCgroup { + if cgroupUsage != nil { if cgroupUsage.VCPUs.RoundedUp() != qmpPluggedCPUs { // This is not expected but it's fine. We only report the // mismatch here and will resolve it in the next reconcile @@ -469,7 +464,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi pluggedCPU := uint32(len(cpuSlotsPlugged)) // get cgroups CPU details from runner pod - var cgroupUsage api.VCPUCgroup + var cgroupUsage *api.VCPUCgroup supportsCgroup := runnerSupportsCgroup(vmRunner) if supportsCgroup { cgroupUsage, err = getRunnerCgroup(ctx, virtualmachine) @@ -480,7 +475,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi } // update status by CPUs used in the VM - r.updateVMStatusCPU(ctx, virtualmachine, vmRunner, pluggedCPU, supportsCgroup, cgroupUsage) + r.updateVMStatusCPU(ctx, virtualmachine, vmRunner, pluggedCPU, cgroupUsage) // get Memory details from hypervisor and update VM status memorySize, err := QmpGetMemorySize(QmpAddr(virtualmachine)) @@ -613,7 +608,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi specCPU := virtualmachine.Spec.Guest.CPUs.Use pluggedCPU := uint32(len(cpuSlotsPlugged)) - var cgroupUsage api.VCPUCgroup + var cgroupUsage *api.VCPUCgroup supportsCgroup := runnerSupportsCgroup(vmRunner) if supportsCgroup { cgroupUsage, err = getRunnerCgroup(ctx, virtualmachine) @@ -715,7 +710,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi // set VM phase to running if everything scaled if cpuScaled && ramScaled { // update status by CPUs used in the VM - r.updateVMStatusCPU(ctx, virtualmachine, vmRunner, pluggedCPU, supportsCgroup, cgroupUsage) + r.updateVMStatusCPU(ctx, virtualmachine, vmRunner, pluggedCPU, cgroupUsage) // get Memory details from hypervisor and update VM status memorySize, err := QmpGetMemorySize(QmpAddr(virtualmachine)) @@ -738,7 +733,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi err := r.Get(ctx, types.NamespacedName{Name: virtualmachine.Status.PodName, Namespace: virtualmachine.Namespace}, vmRunner) if err == nil { // delete current runner - if err = r.deleteRunnerPodIfEnabled(ctx, virtualmachine, vmRunner); err != nil { + if err := r.deleteRunnerPodIfEnabled(ctx, virtualmachine, vmRunner); err != nil { return err } } else if !apierrors.IsNotFound(err) { @@ -760,7 +755,7 @@ func (r *VirtualMachineReconciler) doReconcile(ctx context.Context, virtualmachi // delete runner only when VM failed if found && virtualmachine.Status.Phase == vmv1.VmFailed { // delete current runner - if err = r.deleteRunnerPodIfEnabled(ctx, virtualmachine, vmRunner); err != nil { + if err := r.deleteRunnerPodIfEnabled(ctx, virtualmachine, vmRunner); err != nil { return err } } @@ -1048,6 +1043,7 @@ func setRunnerCgroup(ctx context.Context, vm *vmv1.VirtualMachine, cpu vmv1.Mill if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != 200 { return fmt.Errorf("unexpected status %s", resp.Status) @@ -1055,39 +1051,39 @@ func setRunnerCgroup(ctx context.Context, vm *vmv1.VirtualMachine, cpu vmv1.Mill return nil } -func getRunnerCgroup(ctx context.Context, vm *vmv1.VirtualMachine) (api.VCPUCgroup, error) { +func getRunnerCgroup(ctx context.Context, vm *vmv1.VirtualMachine) (*api.VCPUCgroup, error) { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - result := api.VCPUCgroup{} url := fmt.Sprintf("http://%s:%d/cpu_current", vm.Status.PodIP, vm.Spec.RunnerPort) req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { - return result, err + return nil, err } resp, err := http.DefaultClient.Do(req) if err != nil { - return result, err + return nil, err } if resp.StatusCode != 200 { - return result, fmt.Errorf("unexpected status %s", resp.Status) + return nil, fmt.Errorf("unexpected status %s", resp.Status) } body, err := io.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { - return result, err + return nil, err } + var result api.VCPUCgroup err = json.Unmarshal(body, &result) if err != nil { - return result, err + return nil, err } - return result, nil + return &result, nil } // imageForVirtualMachine gets the Operand image which is managed by this controller @@ -1114,12 +1110,12 @@ func podSpec(virtualmachine *vmv1.VirtualMachine) (*corev1.Pod, error) { vmSpecJson, err := json.Marshal(virtualmachine.Spec) if err != nil { - return nil, fmt.Errorf("marshal VM Spec: %s", err) + return nil, fmt.Errorf("marshal VM Spec: %w", err) } vmStatusJson, err := json.Marshal(virtualmachine.Status) if err != nil { - return nil, fmt.Errorf("marshal VM Status: %s", err) + return nil, fmt.Errorf("marshal VM Status: %w", err) } pod := &corev1.Pod{ diff --git a/neonvm/controllers/virtualmachine_controller_test.go b/neonvm/controllers/virtualmachine_controller_test.go index 1fdda7519..21b253719 100644 --- a/neonvm/controllers/virtualmachine_controller_test.go +++ b/neonvm/controllers/virtualmachine_controller_test.go @@ -18,18 +18,17 @@ package controllers import ( "context" - // "fmt" - // "errors" "os" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" ) @@ -97,8 +96,9 @@ var _ = Describe("VirtualMachine controller", func() { By("Reconciling the custom resource created") virtualmachineReconciler := &VirtualMachineReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Recorder: nil, } _, err = virtualmachineReconciler.Reconcile(ctx, reconcile.Request{ diff --git a/neonvm/controllers/virtualmachine_qmp_queries.go b/neonvm/controllers/virtualmachine_qmp_queries.go index aa1ba20c2..f584095d6 100644 --- a/neonvm/controllers/virtualmachine_qmp_queries.go +++ b/neonvm/controllers/virtualmachine_qmp_queries.go @@ -8,6 +8,7 @@ import ( "time" "github.com/digitalocean/go-qemu/qmp" + "k8s.io/apimachinery/pkg/api/resource" vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" @@ -102,7 +103,7 @@ func QmpGetCpus(ip string, port int32) ([]QmpCpuSlot, []QmpCpuSlot, error) { if err != nil { return nil, nil, err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-hotpluggable-cpus"}`) raw, err := mon.Run(qmpcmd) @@ -111,7 +112,9 @@ func QmpGetCpus(ip string, port int32) ([]QmpCpuSlot, []QmpCpuSlot, error) { } var result QmpCpus - json.Unmarshal(raw, &result) + if err := json.Unmarshal(raw, &result); err != nil { + return nil, nil, fmt.Errorf("error unmarshaling json: %w", err) + } plugged := []QmpCpuSlot{} empty := []QmpCpuSlot{} @@ -139,11 +142,20 @@ func QmpPlugCpu(ip string, port int32) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? // empty list reversed, first cpu slot in the end of list and last cpu slot in the beginning slot := empty[len(empty)-1] - qmpcmd := []byte(fmt.Sprintf(`{"execute": "device_add", "arguments": {"id": "cpu%d", "driver": "%s", "core-id": %d, "socket-id": 0, "thread-id": 0}}`, slot.Core, slot.Type, slot.Core)) + qmpcmd := []byte(fmt.Sprintf(`{ + "execute": "device_add", + "arguments": { + "id": "cpu%d", + "driver": %q, + "core-id": %d, + "socket-id": 0, + "thread-id": 0 + } + }`, slot.Core, slot.Type, slot.Core)) _, err = mon.Run(qmpcmd) if err != nil { @@ -176,9 +188,9 @@ func QmpUnplugCpu(ip string, port int32) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? - cmd := []byte(fmt.Sprintf(`{"execute": "device_del", "arguments": {"id": "%s"}}`, plugged[slot].QOM)) + cmd := []byte(fmt.Sprintf(`{"execute": "device_del", "arguments": {"id": %q}}`, plugged[slot].QOM)) _, err = mon.Run(cmd) if err != nil { return err @@ -207,7 +219,7 @@ func QmpSyncCpuToTarget(vm *vmv1.VirtualMachine, migration *vmv1.VirtualMachineM if err != nil { return err } - defer target.Disconnect() + defer target.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? searchForEmpty: for _, slot := range plugged { @@ -219,7 +231,16 @@ searchForEmpty: continue searchForEmpty } } - qmpcmd := []byte(fmt.Sprintf(`{"execute": "device_add", "arguments": {"id": "cpu%d", "driver": "%s", "core-id": %d, "socket-id": 0, "thread-id": 0}}`, slot.Core, slot.Type, slot.Core)) + qmpcmd := []byte(fmt.Sprintf(`{ + "execute": "device_add", + "arguments": { + "id": "cpu%d", + "driver": %q, + "core-id": %d, + "socket-id": 0, + "thread-id": 0 + } + }`, slot.Core, slot.Type, slot.Core)) _, err = target.Run(qmpcmd) if err != nil { return err @@ -234,7 +255,7 @@ func QmpQueryMemoryDevices(ip string, port int32) ([]QmpMemoryDevice, error) { if err != nil { return nil, err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? var result QmpMemoryDevices cmd := []byte(`{"execute": "query-memory-devices"}`) @@ -242,7 +263,9 @@ func QmpQueryMemoryDevices(ip string, port int32) ([]QmpMemoryDevice, error) { if err != nil { return nil, err } - json.Unmarshal(raw, &result) + if err := json.Unmarshal(raw, &result); err != nil { + return nil, fmt.Errorf("error unmarshaling json: %w", err) + } return result.Return, nil } @@ -265,7 +288,7 @@ func QmpPlugMemory(virtualmachine *vmv1.VirtualMachine) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? // try to find empty slot var slot int32 @@ -290,7 +313,7 @@ func QmpPlugMemory(virtualmachine *vmv1.VirtualMachine) error { if err != nil { // device_add command failed... so try remove object that we just created cmd = []byte(fmt.Sprintf(`{"execute": "object-del", "arguments": {"id": "memslot%d"}}`, slot)) - mon.Run(cmd) + mon.Run(cmd) //nolint:errcheck // already have one error, ignoring the second. return err } @@ -311,7 +334,7 @@ func QmpSyncMemoryToTarget(vm *vmv1.VirtualMachine, migration *vmv1.VirtualMachi if err != nil { return err } - defer target.Disconnect() + defer target.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? for _, m := range memoryDevices { // firsly check if slot occupied already @@ -328,18 +351,32 @@ func QmpSyncMemoryToTarget(vm *vmv1.VirtualMachine, migration *vmv1.VirtualMachi } // add memdev object memdevId := strings.ReplaceAll(m.Data.Memdev, "/objects/", "") - cmd := []byte(fmt.Sprintf(`{"execute": "object-add", "arguments": {"id": "%s", "size": %d, "qom-type": "memory-backend-ram"}}`, memdevId, m.Data.Size)) + cmd := []byte(fmt.Sprintf(`{ + "execute": "object-add", + "arguments": { + "id": %q, + "size": %d, + "qom-type": "memory-backend-ram" + } + }`, memdevId, m.Data.Size)) _, err = target.Run(cmd) if err != nil { return err } // now add pc-dimm device - cmd = []byte(fmt.Sprintf(`{"execute": "device_add", "arguments": {"id": "%s", "driver": "pc-dimm", "memdev": "%s"}}`, m.Data.Id, memdevId)) + cmd = []byte(fmt.Sprintf(`{ + "execute": "device_add", + "arguments": { + "id": %q, + "driver": "pc-dimm", + "memdev": "%s" + } + }`, m.Data.Id, memdevId)) _, err = target.Run(cmd) if err != nil { // device_add command failed... so try remove object that we just created - cmd = []byte(fmt.Sprintf(`{"execute": "object-del", "arguments": {"id": "%s"}}`, m.Data.Memdev)) - target.Run(cmd) + cmd = []byte(fmt.Sprintf(`{"execute": "object-del", "arguments": {"id": %q}}`, m.Data.Memdev)) + target.Run(cmd) //nolint:errcheck // already have one error, ignoring the second. return err } } @@ -358,7 +395,7 @@ func QmpPlugMemoryToRunner(ip string, port int32, size int64) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? // add memdev object for next slot // firstly check if such object already present to avoid repeats @@ -378,7 +415,7 @@ func QmpPlugMemoryToRunner(ip string, port int32, size int64) error { if err != nil { // device_add command failed... so try remove object that we just created cmd = []byte(fmt.Sprintf(`{"execute": "object-del", "arguments": {"id": "memslot%d"}}`, plugged+1)) - mon.Run(cmd) + mon.Run(cmd) //nolint:errcheck // already have one error, ignoring the second. return err } @@ -399,14 +436,14 @@ func QmpUnplugMemory(ip string, port int32) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? // run from last to first var i int var merr error for i = plugged - 1; i >= 0; i-- { // remove pc-dimm device - cmd := []byte(fmt.Sprintf(`{"execute": "device_del", "arguments": {"id": "%s"}}`, memoryDevices[i].Data.Id)) + cmd := []byte(fmt.Sprintf(`{"execute": "device_del", "arguments": {"id": %q}}`, memoryDevices[i].Data.Id)) _, err = mon.Run(cmd) if err != nil { merr = errors.Join(merr, err) @@ -416,7 +453,10 @@ func QmpUnplugMemory(ip string, port int32) error { time.Sleep(time.Second) // remove corresponding memdev object - cmd = []byte(fmt.Sprintf(`{"execute": "object-del", "arguments": {"id": "%s"}}`, strings.ReplaceAll(memoryDevices[i].Data.Memdev, "/objects/", ""))) + cmd = []byte(fmt.Sprintf(`{ + "execute": "object-del", + "arguments": {"id": %q} + }`, strings.ReplaceAll(memoryDevices[i].Data.Memdev, "/objects/", ""))) _, err = mon.Run(cmd) if err != nil { merr = errors.Join(merr, err) @@ -438,7 +478,7 @@ func QmpGetMemorySize(ip string, port int32) (*resource.Quantity, error) { if err != nil { return nil, err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-memory-size-summary"}`) raw, err := mon.Run(qmpcmd) @@ -447,7 +487,9 @@ func QmpGetMemorySize(ip string, port int32) (*resource.Quantity, error) { } var result QmpMemorySize - json.Unmarshal(raw, &result) + if err := json.Unmarshal(raw, &result); err != nil { + return nil, fmt.Errorf("error unmarshaling json: %w", err) + } return resource.NewQuantity(result.Return.BaseMemory+result.Return.PluggedMemory, resource.BinarySI), nil } @@ -466,7 +508,7 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat if err := smon.Connect(); err != nil { return err } - defer smon.Disconnect() + defer smon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? // connect to target runner QMP t_ip := virtualmachinemigration.Status.TargetPodIP @@ -477,7 +519,7 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat if err := tmon.Connect(); err != nil { return err } - defer tmon.Disconnect() + defer tmon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? cache := resource.MustParse("256Mi") var qmpcmd []byte @@ -570,24 +612,25 @@ func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigrat return nil } -func QmpGetMigrationInfo(ip string, port int32) (MigrationInfo, error) { - empty := MigrationInfo{} +func QmpGetMigrationInfo(ip string, port int32) (*MigrationInfo, error) { mon, err := QmpConnect(ip, port) if err != nil { - return empty, err + return nil, err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "query-migrate"}`) raw, err := mon.Run(qmpcmd) if err != nil { - return empty, err + return nil, err } var result QmpMigrationInfo - json.Unmarshal(raw, &result) + if err := json.Unmarshal(raw, &result); err != nil { + return nil, fmt.Errorf("error unmarshaling json: %w", err) + } - return result.Return, nil + return &result.Return, nil } func QmpCancelMigration(ip string, port int32) error { @@ -595,7 +638,7 @@ func QmpCancelMigration(ip string, port int32) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "migrate_cancel"}`) _, err = mon.Run(qmpcmd) @@ -611,7 +654,7 @@ func QmpQuit(ip string, port int32) error { if err != nil { return err } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "quit"}`) _, err = mon.Run(qmpcmd) diff --git a/neonvm/controllers/virtualmachinemigration_controller.go b/neonvm/controllers/virtualmachinemigration_controller.go index c63c50b09..be4e2f4c5 100644 --- a/neonvm/controllers/virtualmachinemigration_controller.go +++ b/neonvm/controllers/virtualmachinemigration_controller.go @@ -23,6 +23,12 @@ import ( "math" "time" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -31,11 +37,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" "github.com/neondatabase/autoscaling/neonvm/controllers/buildtag" diff --git a/neonvm/main.go b/neonvm/main.go index ed29a1d86..e8d875f38 100644 --- a/neonvm/main.go +++ b/neonvm/main.go @@ -25,25 +25,24 @@ import ( "syscall" "time" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - _ "k8s.io/client-go/plugin/pkg/client/auth" - + "github.com/tychoish/fun/srv" "go.uber.org/zap/zapcore" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/klog/v2" + vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" "github.com/neondatabase/autoscaling/neonvm/controllers" "github.com/neondatabase/autoscaling/pkg/util" - "github.com/tychoish/fun/srv" - //+kubebuilder:scaffold:imports ) var ( @@ -94,7 +93,7 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - opts := zap.Options{ + opts := zap.Options{ //nolint:exhaustruct // typical options struct; not all fields needed. Development: true, StacktraceLevel: zapcore.Level(zapcore.PanicLevel), TimeEncoder: zapcore.ISO8601TimeEncoder, diff --git a/neonvm/pkg/ipam-demo.go b/neonvm/pkg/ipam-demo.go index 9848de896..a486768e2 100644 --- a/neonvm/pkg/ipam-demo.go +++ b/neonvm/pkg/ipam-demo.go @@ -9,10 +9,11 @@ import ( "time" "go.uber.org/zap/zapcore" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "k8s.io/klog/v2" + "github.com/neondatabase/autoscaling/neonvm/pkg/ipam" ) @@ -27,7 +28,7 @@ var ( func main() { - opts := zap.Options{ + opts := zap.Options{ //nolint:exhaustruct // typical options struct; not all fields expected to be filled. Development: true, StacktraceLevel: zapcore.Level(zapcore.PanicLevel), TimeEncoder: zapcore.ISO8601TimeEncoder, @@ -65,7 +66,7 @@ func main() { if ip, err := ipam.AcquireIP(ctx, id, demoNamespace); err != nil { logger.Error(err, "lease failed", "id", id) } else { - logger.Info("acquired", "id", id, "ip", ip.String(), "acquired in", time.Now().Sub(startTime)) + logger.Info("acquired", "id", id, "ip", ip.String(), "acquired in", time.Since(startTime)) } }(i) time.Sleep(time.Millisecond * 200) @@ -83,7 +84,7 @@ func main() { if ip, err := ipam.ReleaseIP(ctx, id, demoNamespace); err != nil { logger.Error(err, "release failed", "id", id) } else { - logger.Info("released", "id", id, "ip", ip.String(), "released in", time.Now().Sub(startTime)) + logger.Info("released", "id", id, "ip", ip.String(), "released in", time.Since(startTime)) } }(i) time.Sleep(time.Millisecond * 200) diff --git a/neonvm/pkg/ipam/allocate.go b/neonvm/pkg/ipam/allocate.go index 4ec7169de..4d1d39882 100644 --- a/neonvm/pkg/ipam/allocate.go +++ b/neonvm/pkg/ipam/allocate.go @@ -10,10 +10,13 @@ import ( whereaboutstypes "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" ) -func doAcquire(ctx context.Context, +func doAcquire( + _ context.Context, ipRange RangeConfiguration, reservation []whereaboutstypes.IPReservation, - vmName string, vmNamespace string) (net.IPNet, []whereaboutstypes.IPReservation, error) { + vmName string, + vmNamespace string, +) (net.IPNet, []whereaboutstypes.IPReservation, error) { // reduce whereabouts logging whereaboutslogging.SetLogLevel("error") @@ -38,9 +41,13 @@ func doAcquire(ctx context.Context, return net.IPNet{IP: ip, Mask: ipnet.Mask}, newReservation, nil } -func doRelease(ctx context.Context, - ipRange RangeConfiguration, reservation []whereaboutstypes.IPReservation, - vmName string, vmNamespace string) (net.IPNet, []whereaboutstypes.IPReservation, error) { +func doRelease( + _ context.Context, + ipRange RangeConfiguration, + reservation []whereaboutstypes.IPReservation, + vmName string, + vmNamespace string, +) (net.IPNet, []whereaboutstypes.IPReservation, error) { // reduce whereabouts logging whereaboutslogging.SetLogLevel("error") diff --git a/neonvm/pkg/ipam/client.go b/neonvm/pkg/ipam/client.go index cf4703ec2..1fe76071f 100644 --- a/neonvm/pkg/ipam/client.go +++ b/neonvm/pkg/ipam/client.go @@ -1,10 +1,11 @@ package ipam import ( + nad "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - nad "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" neonvm "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned" ) diff --git a/neonvm/pkg/ipam/ipam.go b/neonvm/pkg/ipam/ipam.go index f8fdc974b..daa5e5a1c 100644 --- a/neonvm/pkg/ipam/ipam.go +++ b/neonvm/pkg/ipam/ipam.go @@ -3,6 +3,7 @@ package ipam import ( "context" "encoding/json" + "errors" "fmt" "net" "strconv" @@ -10,18 +11,18 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/api/errors" + whereaboutsallocate "github.com/k8snetworkplumbingwg/whereabouts/pkg/allocate" + whereaboutstypes "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" + + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/log" neonvmapiv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" neonvm "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned" - - whereaboutsallocate "github.com/k8snetworkplumbingwg/whereabouts/pkg/allocate" - whereaboutstypes "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" ) const ( @@ -71,7 +72,7 @@ func New(ctx context.Context, nadName string, nadNamespace string) (*IPAM, error // get Kubernetes client config cfg, err := config.GetConfig() if err != nil { - return nil, fmt.Errorf("error building kubernetes configuration: %v", err) + return nil, fmt.Errorf("error building kubernetes configuration: %w", err) } // tune Kubernetes client performance @@ -80,7 +81,7 @@ func New(ctx context.Context, nadName string, nadNamespace string) (*IPAM, error kClient, err := NewKubeClient(cfg) if err != nil { - return nil, fmt.Errorf("error creating kubernetes client: %v", err) + return nil, fmt.Errorf("error creating kubernetes client: %w", err) } // read network-attachment-definition from Kubernetes @@ -94,7 +95,7 @@ func New(ctx context.Context, nadName string, nadNamespace string) (*IPAM, error ipamConfig, err := LoadFromNad(nad.Spec.Config, nadNamespace) if err != nil { - return nil, fmt.Errorf("network-attachment-definition IPAM config parse error: %v", err) + return nil, fmt.Errorf("network-attachment-definition IPAM config parse error: %w", err) } return &IPAM{ @@ -107,7 +108,7 @@ func New(ctx context.Context, nadName string, nadNamespace string) (*IPAM, error func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { var n Nad if err := json.Unmarshal([]byte(nadConfig), &n); err != nil { - return nil, fmt.Errorf("json parsing error: %v", err) + return nil, fmt.Errorf("json parsing error: %w", err) } if n.IPAM == nil { @@ -129,7 +130,7 @@ func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { for idx := range n.IPAM.IPRanges { firstip, ipNet, err := net.ParseCIDR(n.IPAM.IPRanges[idx].Range) if err != nil { - return nil, fmt.Errorf("invalid CIDR %s: %v", n.IPAM.IPRanges[idx].Range, err) + return nil, fmt.Errorf("invalid CIDR %s: %w", n.IPAM.IPRanges[idx].Range, err) } n.IPAM.IPRanges[idx].Range = ipNet.String() if n.IPAM.IPRanges[idx].RangeStart == nil { @@ -158,7 +159,7 @@ func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { for idx := range n.IPAM.OmitRanges { _, _, err := net.ParseCIDR(n.IPAM.OmitRanges[idx]) if err != nil { - return nil, fmt.Errorf("invalid exclude CIDR %s: %v", n.IPAM.OmitRanges[idx], err) + return nil, fmt.Errorf("invalid exclude CIDR %s: %w", n.IPAM.OmitRanges[idx], err) } } @@ -231,7 +232,7 @@ func (i *IPAM) acquireORrelease(ctx context.Context, vmName string, vmNamespace case <-done: leCancel() case <-leCtx.Done(): - err = fmt.Errorf("context got timeout while waiting to become leader") + err = errors.New("context got timeout while waiting to become leader") } if err != nil { return ip, err @@ -241,7 +242,7 @@ func (i *IPAM) acquireORrelease(ctx context.Context, vmName string, vmNamespace // ip.String() returns string "" on errors in ip struct parsing or if *ip is nil if ip.String() == "" { - return ip, fmt.Errorf("something wrong, probably with leader election") + return ip, errors.New("something wrong, probably with leader election") } return ip, ipamerr @@ -264,7 +265,7 @@ func (i *IPAM) runIPAM(ctx context.Context, vmName string, vmNamespace string, a // Check connectivity to kubernetes if err := i.Status(ctxWithTimeout); err != nil { - return ip, fmt.Errorf("connectivity error: %v", err) + return ip, fmt.Errorf("connectivity error: %w", err) } // handle the ip add/del until successful @@ -283,12 +284,13 @@ func (i *IPAM) runIPAM(ctx context.Context, vmName string, vmNamespace string, a // read IPPool from ipppols.vm.neon.tech custom resource pool, err := i.getNeonvmIPPool(ctxWithTimeout, ipRange.Range) if err != nil { + //nolint:errorlint // spurious. Temporary is an interface, and doesn't work with errors.As if e, ok := err.(Temporary); ok && e.Temporary() { // retry attempt to read IPPool time.Sleep(DatastoreRetriesDelay) continue } - return ip, fmt.Errorf("error reading IP pool: %v", err) + return ip, fmt.Errorf("error reading IP pool: %w", err) } currentReservation := pool.Allocations(ctx) @@ -311,12 +313,13 @@ func (i *IPAM) runIPAM(ctx context.Context, vmName string, vmNamespace string, a // update IPPool with newReservation err = pool.Update(ctxWithTimeout, newReservation) if err != nil { + //nolint:errorlint // spurious. Temporary is an interface, and doesn't work with errors.As if e, ok := err.(Temporary); ok && e.Temporary() { // retry attempt to update IPPool time.Sleep(DatastoreRetriesDelay) continue } - return ip, fmt.Errorf("error updating IP pool: %v", err) + return ip, fmt.Errorf("error updating IP pool: %w", err) } // pool was read, acquire or release was processed, pool was updated // now we can break retry loop @@ -328,7 +331,7 @@ func (i *IPAM) runIPAM(ctx context.Context, vmName string, vmNamespace string, a } } if ip.IP == nil && action == Acquire { - return ip, fmt.Errorf("can not acquire IP, probably there are no space in IP pools") + return ip, errors.New("can not acquire IP, probably there are no space in IP pools") } return ip, ipamerr @@ -358,7 +361,7 @@ func (p *NeonvmIPPool) Allocations(ctx context.Context) []whereaboutstypes.IPRes } // getNeonvmIPPool returns a NeonVM IPPool for the given IP range -func (i *IPAM) getNeonvmIPPool(ctx context.Context, ipRange string) (NeonvmIPPool, error) { +func (i *IPAM) getNeonvmIPPool(ctx context.Context, ipRange string) (*NeonvmIPPool, error) { // for IP range 10.11.22.0/24 poll name will be // "10.11.22.0-24" if no network name in ipam spec, or // "samplenet-10.11.22.0-24" if nametwork name is `samplenet` @@ -370,35 +373,42 @@ func (i *IPAM) getNeonvmIPPool(ctx context.Context, ipRange string) (NeonvmIPPoo } pool, err := i.vmClient.NeonvmV1().IPPools(i.Config.NetworkNamespace).Get(ctx, poolName, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { // pool does not exist, create it - newPool := &neonvmapiv1.IPPool{} - newPool.ObjectMeta.Name = poolName - newPool.Spec.Range = ipRange - newPool.Spec.Allocations = make(map[string]neonvmapiv1.IPAllocation) + newPool := &neonvmapiv1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: poolName, + Namespace: i.Config.NetworkNamespace, + }, + Spec: neonvmapiv1.IPPoolSpec{ + Range: ipRange, + Allocations: make(map[string]neonvmapiv1.IPAllocation), + }, + } _, err = i.vmClient.NeonvmV1().IPPools(i.Config.NetworkNamespace).Create(ctx, newPool, metav1.CreateOptions{}) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { // the pool was just created -- allow retry - return NeonvmIPPool{}, &temporaryError{err} + return nil, &temporaryError{err} } else if err != nil { - return NeonvmIPPool{}, err + return nil, err } // if the pool was created for the first time, trigger another retry of the allocation loop - return NeonvmIPPool{}, &temporaryError{fmt.Errorf("NeonvmIPPool was initialized")} + return nil, &temporaryError{errors.New("NeonvmIPPool was initialized")} } else if err != nil { - return NeonvmIPPool{}, err + return nil, err } // get first IP in the pool ip, _, err := net.ParseCIDR(pool.Spec.Range) if err != nil { - return NeonvmIPPool{}, err + return nil, err } - return NeonvmIPPool{ + return &NeonvmIPPool{ vmClient: i.Client.vmClient, pool: pool, - firstip: ip}, nil + firstip: ip, + }, nil } // Update NeonvmIPPool with new IP reservation @@ -406,7 +416,7 @@ func (p *NeonvmIPPool) Update(ctx context.Context, reservation []whereaboutstype p.pool.Spec.Allocations = toAllocations(reservation, p.firstip) _, err := p.vmClient.NeonvmV1().IPPools(p.pool.Namespace).Update(ctx, p.pool, metav1.UpdateOptions{}) if err != nil { - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { return &temporaryError{err} } return err @@ -427,7 +437,12 @@ func toIPReservation(ctx context.Context, allocations map[string]neonvmapiv1.IPA continue } ip := whereaboutsallocate.IPAddOffset(firstip, uint64(numOffset)) - reservelist = append(reservelist, whereaboutstypes.IPReservation{IP: ip, ContainerID: a.ContainerID, PodRef: a.PodRef}) + reservelist = append(reservelist, whereaboutstypes.IPReservation{ + IP: ip, + ContainerID: a.ContainerID, + PodRef: a.PodRef, + IsAllocated: false, + }) } return reservelist } diff --git a/neonvm/runner/main.go b/neonvm/runner/main.go index 92a3f2810..1d0b0d0c5 100644 --- a/neonvm/runner/main.go +++ b/neonvm/runner/main.go @@ -1,26 +1,25 @@ package main import ( + "bytes" "context" + "crypto/sha256" "encoding/base64" + "encoding/hex" "encoding/json" "errors" - "io" - "net/http" - "strconv" - - "bytes" - "crypto/sha256" - "encoding/hex" "flag" "fmt" + "io" "math" "net" + "net/http" "os" "os/exec" "os/signal" "path/filepath" "regexp" + "strconv" "strings" "sync" "syscall" @@ -37,6 +36,7 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" "github.com/vishvananda/netlink" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/api/resource" vmv1 "github.com/neondatabase/autoscaling/neonvm/apis/neonvm/v1" @@ -199,7 +199,7 @@ func createISO9660runtime(diskPath string, command, args, sysctl []string, env [ if err != nil { return err } - defer writer.Cleanup() + defer writer.Cleanup() //nolint:errcheck // Nothing to do with the error, maybe log it ? TODO if len(sysctl) != 0 { err = writer.AddFile(bytes.NewReader([]byte(strings.Join(sysctl, "\n"))), "sysctl.conf") @@ -372,7 +372,7 @@ func createISO9660FromPath(logger *zap.Logger, diskName string, diskPath string, if err != nil { return err } - defer writer.Cleanup() + defer writer.Cleanup() //nolint:errcheck // Nothing to do with the error, maybe log it ? TODO dir, err := os.Open(contentPath) if err != nil { @@ -407,14 +407,18 @@ func createISO9660FromPath(logger *zap.Logger, diskName string, diskPath string, continue } - logger.Info("adding file to ISO9660 disk", zap.String("path", outputPath)) - fileToAdd, err := os.Open(fileName) - if err != nil { - return err - } - defer fileToAdd.Close() + // run the file handling logic in a closure, so the defers happen within the loop body, + // rather than the outer function. + err = func() error { + logger.Info("adding file to ISO9660 disk", zap.String("path", outputPath)) + fileToAdd, err := os.Open(fileName) + if err != nil { + return err + } + defer fileToAdd.Close() - err = writer.AddFile(fileToAdd, outputPath) + return writer.AddFile(fileToAdd, outputPath) + }() if err != nil { return err } @@ -494,8 +498,8 @@ func main() { if err := json.Unmarshal(vmSpecJson, vmSpec); err != nil { logger.Fatal("Failed to unmarshal VM spec", zap.Error(err)) } - vmStatus := &vmv1.VirtualMachineStatus{} - if err := json.Unmarshal(vmStatusJson, vmStatus); err != nil { + var vmStatus vmv1.VirtualMachineStatus + if err := json.Unmarshal(vmStatusJson, &vmStatus); err != nil { logger.Fatal("Failed to unmarshal VM Status", zap.Error(err)) } @@ -532,8 +536,10 @@ func main() { if err != nil { logger.Fatal("could not get root image size", zap.Error(err)) } - imageSize := QemuImgOutputPartial{} - json.Unmarshal(qemuImgOut, &imageSize) + var imageSize QemuImgOutputPartial + if err := json.Unmarshal(qemuImgOut, &imageSize); err != nil { + logger.Fatal("Failed to unmarhsal QEMU image size", zap.Error(err)) + } imageSizeQuantity := resource.NewQuantity(imageSize.VirtualSize, resource.BinarySI) // going to resize @@ -698,9 +704,8 @@ func handleCPUChange(logger *zap.Logger, w http.ResponseWriter, r *http.Request, return } - parsed := api.VCPUChange{} - err = json.Unmarshal(body, &parsed) - if err != nil { + var parsed api.VCPUChange + if err = json.Unmarshal(body, &parsed); err != nil { logger.Error("could not parse body", zap.Error(err)) w.WriteHeader(400) return @@ -740,7 +745,7 @@ func handleCPUCurrent(logger *zap.Logger, w http.ResponseWriter, r *http.Request } w.Header().Add("Content-Type", "application/json") - w.Write(body) + w.Write(body) //nolint:errcheck // Not much to do with the error here. TODO: log it? } func listenForCPUChanges(ctx context.Context, logger *zap.Logger, port int32, cgroupPath string, wg *sync.WaitGroup) { @@ -944,7 +949,7 @@ func getCgroupQuota(cgroupPath string) (*vmv1.MilliCPU, error) { arr := strings.Split(strings.Trim(string(data), "\n"), " ") if len(arr) == 0 { - return nil, fmt.Errorf("unexpected cgroup data") + return nil, errors.New("unexpected cgroup data") } quota, err := strconv.ParseUint(arr[0], 10, 64) if err != nil { @@ -1004,7 +1009,7 @@ func terminateQemuOnSigterm(ctx context.Context, logger *zap.Logger, wg *sync.Wa logger.Error("failed to start monitor connection", zap.Error(err)) return } - defer mon.Disconnect() + defer mon.Disconnect() //nolint:errcheck // nothing to do with error when deferred. TODO: log it? qmpcmd := []byte(`{"execute": "system_powerdown"}`) _, err = mon.Run(qmpcmd) @@ -1014,8 +1019,6 @@ func terminateQemuOnSigterm(ctx context.Context, logger *zap.Logger, wg *sync.Wa } logger.Info("system_powerdown command sent to QEMU") - - return } func calcIPs(cidr string) (net.IP, net.IP, net.IPMask, error) { @@ -1138,7 +1141,7 @@ func defaultNetwork(logger *zap.Logger, cidr string, ports []vmv1.Port) (mac.MAC } // pass incoming traffic to .Guest.Spec.Ports into VM - iptablesArgs := []string{} + var iptablesArgs []string for _, port := range ports { logger.Info(fmt.Sprintf("setup DNAT rule for incoming traffic to port %d", port.Port)) iptablesArgs = []string{ @@ -1147,7 +1150,7 @@ func defaultNetwork(logger *zap.Logger, cidr string, ports []vmv1.Port) (mac.MAC "-j", "DNAT", "--to", fmt.Sprintf("%s:%d", ipVm.String(), port.Port), } if err := execFg("iptables", iptablesArgs...); err != nil { - logger.Error("could not set up DNAT rule for incoming traffic", zap.Error(err)) + logger.Error("could not set up DNAT rule for incoming traffic", zap.Error(err)) return nil, err } logger.Info(fmt.Sprintf("setup DNAT rule for traffic originating from localhost to port %d", port.Port)) @@ -1173,7 +1176,7 @@ func defaultNetwork(logger *zap.Logger, cidr string, ports []vmv1.Port) (mac.MAC return nil, err } } - logger.Info(fmt.Sprintf("setup MASQUERADE rule for traffic originating from localhost")) + logger.Info("setup MASQUERADE rule for traffic originating from localhost") iptablesArgs = []string{ "-t", "nat", "-A", "POSTROUTING", "-m", "addrtype", "--src-type", "LOCAL", "--dst-type", "UNICAST", diff --git a/neonvm/tools/vm-builder/main.go b/neonvm/tools/vm-builder/main.go index 4b2d538ac..0b8c47950 100644 --- a/neonvm/tools/vm-builder/main.go +++ b/neonvm/tools/vm-builder/main.go @@ -56,11 +56,6 @@ var ( version = flag.Bool("version", false, `Print vm-builder version`) ) -type dockerMessage struct { - Stream string `json:"stream"` - Error string `json:"error"` -} - func AddTemplatedFileToTar(tw *tar.Writer, tmplArgs any, filename string, tmplString string) error { tmpl, err := template.New(filename).Parse(tmplString) if err != nil { @@ -155,7 +150,7 @@ func main() { if !*forcePull { hostImages, err := cli.ImageList(ctx, types.ImageListOptions{}) if err != nil { - log.Fatalln(err) + log.Fatalln(err) //nolint:gocritic // linter complains that Fatalln circumvents deferred cli.Close(). Too much work to fix in #721, leaving for later. } for _, img := range hostImages { @@ -173,15 +168,22 @@ func main() { if !hostContainsSrcImage { // pull source image - log.Printf("Pull source docker image: %s", *srcImage) - pull, err := cli.ImagePull(ctx, *srcImage, types.ImagePullOptions{}) + // use a closure so deferred close is closer + err := func() error { + log.Printf("Pull source docker image: %s", *srcImage) + pull, err := cli.ImagePull(ctx, *srcImage, types.ImagePullOptions{}) + if err != nil { + return err + } + defer pull.Close() + // do quiet pull - discard output + _, err = io.Copy(io.Discard, pull) + return err + }() if err != nil { log.Fatalln(err) } - defer pull.Close() - // do quiet pull - discard output - io.Copy(io.Discard, pull) } log.Printf("Build docker image for virtual machine (disk size %s): %s\n", *size, dstIm) @@ -200,16 +202,20 @@ func main() { } tmplArgs := TemplatesContext{ + User: "root", // overridden below, if imageSpec.Config.User != "" Entrypoint: imageSpec.Config.Entrypoint, Cmd: imageSpec.Config.Cmd, Env: imageSpec.Config.Env, RootDiskImage: *srcImage, + + SpecBuild: "", // overridden below if spec != nil + SpecMerge: "", // overridden below if spec != nil + InittabCommands: nil, // overridden below if spec != nil + ShutdownHook: "", // overridden below if spec != nil } if len(imageSpec.Config.User) != 0 { tmplArgs.User = imageSpec.Config.User - } else { - tmplArgs.User = "root" } tarBuffer := new(bytes.Buffer) @@ -326,7 +332,7 @@ func main() { tarReader := tar.NewReader(fromContainer) for { header, err := tarReader.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { log.Fatalln(err) @@ -336,15 +342,19 @@ func main() { log.Printf("skip file %s", header.Name) continue } - path := filepath.Join(*outFile) + path := filepath.Join(*outFile) //nolint:gocritic // FIXME: this is probably incorrect, intended to join with header.Name ? info := header.FileInfo() - file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) - if err != nil { - log.Fatalln(err) - } - defer file.Close() - _, err = io.Copy(file, tarReader) + // Open and write to the file inside a closure, so we can defer close + err = func() error { + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, tarReader) + return err + }() if err != nil { log.Fatalln(err) } diff --git a/neonvm/tools/vxlan/controller/main.go b/neonvm/tools/vxlan/controller/main.go index 3766df852..53afab2ae 100644 --- a/neonvm/tools/vxlan/controller/main.go +++ b/neonvm/tools/vxlan/controller/main.go @@ -30,7 +30,7 @@ const ( ) var ( - delete = flag.Bool("delete", false, `delete VXLAN interfaces`) + deleteIfaces = flag.Bool("delete", false, `delete VXLAN interfaces`) ) func main() { @@ -49,7 +49,7 @@ func main() { } // -delete option used for teardown vxlan setup - if *delete { + if *deleteIfaces { log.Printf("deleting vxlan interface %s", VXLAN_IF_NAME) if err := deleteLink(VXLAN_IF_NAME); err != nil { log.Print(err) @@ -125,7 +125,7 @@ func createBrigeInterface(name string) error { log.Printf("link with name %s already found", name) return nil } - _, notFound := err.(netlink.LinkNotFoundError) + _, notFound := err.(netlink.LinkNotFoundError) //nolint:errorlint // errors.Is doesn't work, we actually just want to know the type. if !notFound { return err } @@ -154,7 +154,7 @@ func createVxlanInterface(name string, vxlanID int, ownIP string, bridgeName str log.Printf("link with name %s already found", name) return nil } - _, notFound := err.(netlink.LinkNotFoundError) + _, notFound := err.(netlink.LinkNotFoundError) //nolint:errorlint // errors.Is doesn't work, we actually just want to know the type. if !notFound { return err } @@ -231,7 +231,7 @@ func deleteLink(name string) error { log.Printf("link with name %s was deleted", name) return nil } - _, notFound := err.(netlink.LinkNotFoundError) + _, notFound := err.(netlink.LinkNotFoundError) //nolint:errorlint // errors.Is doesn't work, we actually just want to know the type. if !notFound { return err }