diff --git a/changes/unreleased/Fixed-20240213-111303.yaml b/changes/unreleased/Fixed-20240213-111303.yaml new file mode 100644 index 000000000..e38c19fb3 --- /dev/null +++ b/changes/unreleased/Fixed-20240213-111303.yaml @@ -0,0 +1,5 @@ +kind: Fixed +body: Resolves the issue when Istio proxy sidecar is injected as the first container. +time: 2024-02-13T11:13:03.768289581-04:00 +custom: + Issue: "702" diff --git a/pkg/builder/builder_test.go b/pkg/builder/builder_test.go index 20127c659..f35a715dd 100644 --- a/pkg/builder/builder_test.go +++ b/pkg/builder/builder_test.go @@ -22,8 +22,8 @@ import ( . "github.com/onsi/gomega" vapi "github.com/vertica/vertica-kubernetes/api/v1" vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" - "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/paths" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/intstr" @@ -271,9 +271,10 @@ var _ = Describe("builder", func() { vdb.Annotations[vmeta.VClusterOpsAnnotation] = vmeta.VClusterOpsAnnotationTrue c := buildPodSpec(vdb, &vdb.Spec.Subclusters[0]) - inx := names.GetServerContainerIndex(vdb) - Ω(c.Containers[inx].ReadinessProbe.HTTPGet).ShouldNot(BeNil()) - Ω(c.Containers[inx].LivenessProbe.HTTPGet).ShouldNot(BeNil()) + svrCnt := vk8s.GetServerContainer(c.Containers) + Ω(svrCnt).ShouldNot(BeNil()) + Ω(svrCnt.ReadinessProbe.HTTPGet).ShouldNot(BeNil()) + Ω(svrCnt.LivenessProbe.HTTPGet).ShouldNot(BeNil()) vdb.Spec.ReadinessProbeOverride = &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -290,10 +291,12 @@ var _ = Describe("builder", func() { }, } c = buildPodSpec(vdb, &vdb.Spec.Subclusters[0]) - Ω(c.Containers[inx].ReadinessProbe.HTTPGet).Should(BeNil()) - Ω(c.Containers[inx].LivenessProbe.HTTPGet).Should(BeNil()) - Ω(c.Containers[inx].ReadinessProbe.Exec).ShouldNot(BeNil()) - Ω(c.Containers[inx].LivenessProbe.TCPSocket).ShouldNot(BeNil()) + svrCnt = vk8s.GetServerContainer(c.Containers) + Ω(svrCnt).ShouldNot(BeNil()) + Ω(svrCnt.ReadinessProbe.HTTPGet).Should(BeNil()) + Ω(svrCnt.LivenessProbe.HTTPGet).Should(BeNil()) + Ω(svrCnt.ReadinessProbe.Exec).ShouldNot(BeNil()) + Ω(svrCnt.LivenessProbe.TCPSocket).ShouldNot(BeNil()) }) It("should not use canary query probe if using GSM", func() { diff --git a/pkg/controllers/vdb/obj_reconciler.go b/pkg/controllers/vdb/obj_reconciler.go index 85c655aef..c649102ad 100644 --- a/pkg/controllers/vdb/obj_reconciler.go +++ b/pkg/controllers/vdb/obj_reconciler.go @@ -33,6 +33,7 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/paths" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -412,13 +413,7 @@ func (o *ObjReconciler) reconcileSts(ctx context.Context, sc *vapi.Subcluster) ( err := o.VRec.Client.Get(ctx, nm, curSts) if err != nil && errors.IsNotFound(err) { o.Log.Info("Creating statefulset", "Name", nm, "Size", expSts.Spec.Replicas, "Image", expSts.Spec.Template.Spec.Containers[0].Image) - err = ctrl.SetControllerReference(o.Vdb, expSts, o.VRec.Scheme) - if err != nil { - return ctrl.Result{}, err - } - // Invalidate the pod facts cache since we are creating a new sts - o.PFacts.Invalidate() - return ctrl.Result{}, o.VRec.Client.Create(ctx, expSts) + return ctrl.Result{}, o.createSts(ctx, expSts) } // We can only remove pods if we have called remove node and done the @@ -434,13 +429,20 @@ func (o *ObjReconciler) reconcileSts(ctx context.Context, sc *vapi.Subcluster) ( // We always preserve the image. This is done because during upgrade, the // image is changed outside of this reconciler. It is done through a // separate update to the sts. - i := names.GetServerContainerIndex(o.Vdb) - // It does not matter which is the first container, - // they have the same image - curImage := curSts.Spec.Template.Spec.Containers[0].Image - expSts.Spec.Template.Spec.Containers[i].Image = curImage - if o.Vdb.IsNMASideCarDeploymentEnabled() { - expSts.Spec.Template.Spec.Containers[names.GetNMAContainerIndex()].Image = curImage + // + // Both the NMA and server container have the same image, but the server + // container is guaranteed to be their for all deployments. + curImage, err := vk8s.GetServerImage(curSts.Spec.Template.Spec.Containers) + if err != nil { + return ctrl.Result{}, err + } + expSvrCnt := vk8s.GetServerContainer(expSts.Spec.Template.Spec.Containers) + if expSvrCnt == nil { + return ctrl.Result{}, fmt.Errorf("could not find server container in sts %s", expSts.Name) + } + expSvrCnt.Image = curImage + if expNMACnt := vk8s.GetNMAContainer(expSts.Spec.Template.Spec.Containers); expNMACnt != nil { + expNMACnt.Image = curImage } // Preserve scaling if told to do so. This is used when doing early @@ -465,15 +467,33 @@ func (o *ObjReconciler) reconcileSts(ctx context.Context, sc *vapi.Subcluster) ( // drop the old sts and create a fresh one. if isNMADeploymentDifferent(curSts, expSts) { o.Log.Info("Dropping then recreating statefulset", "Name", expSts.Name) - if err := o.VRec.Client.Delete(ctx, curSts); err != nil { - return ctrl.Result{}, err - } - if err := ctrl.SetControllerReference(o.Vdb, expSts, o.VRec.Scheme); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{}, o.VRec.Client.Create(ctx, expSts) + return ctrl.Result{}, o.recreateSts(ctx, curSts, expSts) + } + + return ctrl.Result{}, o.updateSts(ctx, curSts, expSts) +} + +// recreateSts will drop then create the statefulset +func (o *ObjReconciler) recreateSts(ctx context.Context, curSts, expSts *appsv1.StatefulSet) error { + if err := o.VRec.Client.Delete(ctx, curSts); err != nil { + return err + } + return o.createSts(ctx, expSts) +} + +// createSts will create a new sts. It assumes the statefulset doesn't already exist. +func (o *ObjReconciler) createSts(ctx context.Context, expSts *appsv1.StatefulSet) error { + err := ctrl.SetControllerReference(o.Vdb, expSts, o.VRec.Scheme) + if err != nil { + return err } + // Invalidate the pod facts cache since we are creating a new sts + o.PFacts.Invalidate() + return o.VRec.Client.Create(ctx, expSts) +} +// updateSts will patch an existing statefulset. +func (o *ObjReconciler) updateSts(ctx context.Context, curSts, expSts *appsv1.StatefulSet) error { // Update the sts by patching in fields that changed according to expSts. // Due to the omission of default fields in expSts, curSts != expSts. We // always send a patch request, then compare what came back against origSts @@ -484,15 +504,14 @@ func (o *ObjReconciler) reconcileSts(ctx context.Context, sc *vapi.Subcluster) ( expSts.Spec.DeepCopyInto(&curSts.Spec) curSts.Labels = expSts.Labels if err := o.VRec.Client.Patch(ctx, curSts, patch); err != nil { - return ctrl.Result{}, err + return err } if !reflect.DeepEqual(curSts.Spec, origSts.Spec) { o.Log.Info("Patching statefulset", "Name", expSts.Name, "Image", expSts.Spec.Template.Spec.Containers[0].Image) // Invalidate the pod facts cache since we are about to change the sts o.PFacts.Invalidate() - return ctrl.Result{}, nil } - return ctrl.Result{}, nil + return nil } // isNMADeploymentDifferent will return true if one of the statefulsets have a diff --git a/pkg/controllers/vdb/offlineupgrade_reconciler.go b/pkg/controllers/vdb/offlineupgrade_reconciler.go index 0dc6fc1d7..7525622d6 100644 --- a/pkg/controllers/vdb/offlineupgrade_reconciler.go +++ b/pkg/controllers/vdb/offlineupgrade_reconciler.go @@ -27,9 +27,9 @@ import ( verrors "github.com/vertica/vertica-kubernetes/pkg/errors" "github.com/vertica/vertica-kubernetes/pkg/events" "github.com/vertica/vertica-kubernetes/pkg/iter" - "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/vadmin" "github.com/vertica/vertica-kubernetes/pkg/vadmin/opts/stopdb" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -252,8 +252,11 @@ func (o *OfflineUpgradeReconciler) checkForNewPods(ctx context.Context) (ctrl.Re } for i := range pods.Items { pod := &pods.Items[i] - cnts := pod.Spec.Containers - if cnts[names.GetFirstContainerIndex()].Image == o.Vdb.Spec.Image { + cntImage, err := vk8s.GetServerImage(pod.Spec.Containers) + if err != nil { + return ctrl.Result{}, err + } + if cntImage == o.Vdb.Spec.Image { foundPodWithNewImage = true break } @@ -334,8 +337,11 @@ func (o *OfflineUpgradeReconciler) anyPodsRunningWithOldImage(ctx context.Contex if errors.IsNotFound(err) { continue } - cnts := pod.Spec.Containers - if cnts[names.GetFirstContainerIndex()].Image != o.Vdb.Spec.Image { + cntImage, err := vk8s.GetServerImage(pod.Spec.Containers) + if err != nil { + return false, err + } + if cntImage != o.Vdb.Spec.Image { return true, nil } } diff --git a/pkg/controllers/vdb/offlineupgrade_reconciler_test.go b/pkg/controllers/vdb/offlineupgrade_reconciler_test.go index 18c880bde..872c33a80 100644 --- a/pkg/controllers/vdb/offlineupgrade_reconciler_test.go +++ b/pkg/controllers/vdb/offlineupgrade_reconciler_test.go @@ -26,6 +26,7 @@ import ( "github.com/vertica/vertica-kubernetes/pkg/iter" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/test" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" ctrl "sigs.k8s.io/controller-runtime" ) @@ -45,7 +46,7 @@ var _ = Describe("offlineupgrade_reconcile", func() { sts := &appsv1.StatefulSet{} Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[0]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image).ShouldNot(Equal(NewImage)) + Expect(vk8s.GetServerImage(sts.Spec.Template.Spec.Containers)).ShouldNot(Equal(NewImage)) updateVdbToCauseUpgrade(ctx, vdb, NewImage) @@ -53,7 +54,7 @@ var _ = Describe("offlineupgrade_reconcile", func() { Expect(r.Reconcile(ctx, &ctrl.Request{})).Should(Equal(ctrl.Result{Requeue: false, RequeueAfter: vdb.GetUpgradeRequeueTimeDuration()})) Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[0]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image).Should(Equal(NewImage)) + Expect(vk8s.GetServerImage(sts.Spec.Template.Spec.Containers)).Should(Equal(NewImage)) }) It("should stop cluster during an upgrade", func() { diff --git a/pkg/controllers/vdb/onlineupgrade_reconciler.go b/pkg/controllers/vdb/onlineupgrade_reconciler.go index 55b5b3ca1..7b1bcb7cb 100644 --- a/pkg/controllers/vdb/onlineupgrade_reconciler.go +++ b/pkg/controllers/vdb/onlineupgrade_reconciler.go @@ -32,6 +32,7 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/vadmin" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -411,7 +412,10 @@ func (o *OnlineUpgradeReconciler) isMatchingSubclusterType(sts *appsv1.StatefulS // drainSubcluster will reroute traffic away from a subcluster and wait for it to be idle. // This is a no-op if the image has already been updated for the subcluster. func (o *OnlineUpgradeReconciler) drainSubcluster(ctx context.Context, sts *appsv1.StatefulSet) (ctrl.Result, error) { - img := sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image + img, err := vk8s.GetServerImage(sts.Spec.Template.Spec.Containers) + if err != nil { + return ctrl.Result{}, err + } if img != o.Vdb.Spec.Image { scName := sts.Labels[vmeta.SubclusterNameLabel] @@ -542,7 +546,10 @@ func (o *OnlineUpgradeReconciler) waitForReadOnly(_ context.Context, sts *appsv1 if o.PFacts.countUpPrimaryNodes() != 0 { return ctrl.Result{}, nil } - newImage := sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image + newImage, err := vk8s.GetServerImage(sts.Spec.Template.Spec.Containers) + if err != nil { + return ctrl.Result{}, err + } // If all the pods that are running the old image are read-only we are done // our wait. if o.PFacts.countNotReadOnlyWithOldImage(newImage) == 0 { @@ -661,7 +668,10 @@ func (o *OnlineUpgradeReconciler) cachePrimaryImages(ctx context.Context) error for i := range stss.Items { sts := &stss.Items[i] if sts.Labels[vmeta.SubclusterTypeLabel] == vapi.PrimarySubcluster { - img := sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image + img, err := vk8s.GetServerImage(sts.Spec.Template.Spec.Containers) + if err != nil { + return err + } imageFound := false for j := range o.PrimaryImages { imageFound = o.PrimaryImages[j] == img diff --git a/pkg/controllers/vdb/onlineupgrade_reconciler_test.go b/pkg/controllers/vdb/onlineupgrade_reconciler_test.go index b795695ca..3df284767 100644 --- a/pkg/controllers/vdb/onlineupgrade_reconciler_test.go +++ b/pkg/controllers/vdb/onlineupgrade_reconciler_test.go @@ -27,6 +27,7 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/test" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -333,9 +334,9 @@ var _ = Describe("onlineupgrade_reconcile", func() { sts := &appsv1.StatefulSet{} Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[0]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image).Should(Equal(NewImageName)) + Expect(vk8s.GetServerImage(sts.Spec.Template.Spec.Containers)).Should(Equal(NewImageName)) Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[1]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image).Should(Equal(NewImageName)) + Expect(vk8s.GetServerImage(sts.Spec.Template.Spec.Containers)).Should(Equal(NewImageName)) }) It("should have an upgradeStatus set when it fails part way through", func() { diff --git a/pkg/controllers/vdb/podfacts.go b/pkg/controllers/vdb/podfacts.go index fc82f9753..b732b7e2a 100644 --- a/pkg/controllers/vdb/podfacts.go +++ b/pkg/controllers/vdb/podfacts.go @@ -34,6 +34,7 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/paths" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" @@ -345,9 +346,15 @@ func (p *PodFacts) collectPodByStsIndex(ctx context.Context, vdb *vapi.VerticaDB pf.isTransient, _ = strconv.ParseBool(pod.Labels[vmeta.SubclusterTransientLabel]) pf.isPendingDelete = podIndex >= sc.Size // Let's just pick the first container image - pf.image = pod.Spec.Containers[names.GetFirstContainerIndex()].Image + pf.image, err = vk8s.GetServerImage(pod.Spec.Containers) + if err != nil { + return err + } pf.hasDCTableAnnotations = p.checkDCTableAnnotations(pod) - pf.catalogPath = p.getCatalogPathFromPod(vdb, pod) + pf.catalogPath, err = p.getCatalogPathFromPod(vdb, pod) + if err != nil { + return err + } pf.hasNMASidecar = builder.HasNMAContainer(&pod.Spec) } @@ -689,30 +696,33 @@ func (p *PodFacts) checkDCTableAnnotations(pod *corev1.Pod) bool { } // getCatalogPathFromPod will get the current catalog path from the pod -func (p *PodFacts) getCatalogPathFromPod(vdb *vapi.VerticaDB, pod *corev1.Pod) string { +func (p *PodFacts) getCatalogPathFromPod(vdb *vapi.VerticaDB, pod *corev1.Pod) (string, error) { // both server and nma(if sidecar deployment enabled) have - // the catalog path env set so we can pick either to get it - index := names.GetFirstContainerIndex() - return p.getEnvValueFromPodWithDefault(pod, index, - builder.CatalogPathEnv, vdb.Spec.Local.GetCatalogPath()) + // the catalog path env set so we just pick the server since that container + // will be available in all deployments. + cnt := vk8s.GetServerContainer(pod.Spec.Containers) + if cnt == nil { + return "", fmt.Errorf("could not find the server container in the pod %s", pod.Name) + } + return p.getEnvValueFromPodWithDefault(cnt, + builder.CatalogPathEnv, vdb.Spec.Local.GetCatalogPath()), nil } // getEnvValueFromPodWithDefault will get an environment value from the pod. A default // value is used if the env var isn't found. -func (p *PodFacts) getEnvValueFromPodWithDefault(pod *corev1.Pod, cntIndex int, +func (p *PodFacts) getEnvValueFromPodWithDefault(cnt *corev1.Container, envName, defaultValue string) string { - pathPrefix, ok := p.getEnvValueFromPod(pod, cntIndex, envName) + pathPrefix, ok := p.getEnvValueFromPod(cnt, envName) if !ok { return defaultValue } return pathPrefix } -func (p *PodFacts) getEnvValueFromPod(pod *corev1.Pod, index int, envName string) (string, bool) { - c := pod.Spec.Containers[index] - for i := range c.Env { - if c.Env[i].Name == envName { - return c.Env[i].Value, true +func (p *PodFacts) getEnvValueFromPod(cnt *corev1.Container, envName string) (string, bool) { + for i := range cnt.Env { + if cnt.Env[i].Name == envName { + return cnt.Env[i].Value, true } } return "", false diff --git a/pkg/controllers/vdb/restart_reconciler.go b/pkg/controllers/vdb/restart_reconciler.go index 2a12799f7..ffc05f751 100644 --- a/pkg/controllers/vdb/restart_reconciler.go +++ b/pkg/controllers/vdb/restart_reconciler.go @@ -38,6 +38,7 @@ import ( "github.com/vertica/vertica-kubernetes/pkg/vadmin/opts/restartnode" "github.com/vertica/vertica-kubernetes/pkg/vadmin/opts/startdb" "github.com/vertica/vertica-kubernetes/pkg/vdbstatus" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -597,8 +598,11 @@ func (r *RestartReconciler) makeResultForLivenessProbeWait(ctx context.Context) } return ctrl.Result{}, err } - cnts := pod.Spec.Containers - probe := cnts[names.GetServerContainerIndexInSlice(cnts)].LivenessProbe + svrCnt := vk8s.GetServerContainer(pod.Spec.Containers) + if svrCnt == nil { + return ctrl.Result{}, fmt.Errorf("could not find server container for pod %s", pod.Name) + } + probe := svrCnt.LivenessProbe if probe == nil { // For backwards compatibility, if the probe isn't set, then we just // return a simple requeue with exponential backoff. @@ -621,9 +625,11 @@ func (r *RestartReconciler) isStartupProbeActive(ctx context.Context, nm types.N } // If the pod doesn't have a livenessProbe then we always return true. This // can happen if we are in the middle of upgrading the operator. - cnts := pod.Spec.Containers - probe := cnts[names.GetServerContainerIndexInSlice(cnts)].LivenessProbe - if probe == nil { + svrCnt := vk8s.GetServerContainer(pod.Spec.Containers) + if svrCnt == nil { + return false, fmt.Errorf("could not find server container for pod %s", nm.Name) + } + if svrCnt.LivenessProbe == nil { r.Log.Info("Pod doesn't have a livenessProbe. Okay to restart", "pod", nm) return true, nil } diff --git a/pkg/controllers/vdb/upgrade.go b/pkg/controllers/vdb/upgrade.go index 1c4205c91..1d9a1eb8f 100644 --- a/pkg/controllers/vdb/upgrade.go +++ b/pkg/controllers/vdb/upgrade.go @@ -29,6 +29,7 @@ import ( "github.com/vertica/vertica-kubernetes/pkg/metrics" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/vdbstatus" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -101,8 +102,11 @@ func (i *UpgradeManager) isVDBImageDifferent(ctx context.Context) (bool, error) } for inx := range stss.Items { sts := stss.Items[inx] - cnts := sts.Spec.Template.Spec.Containers - if cnts[names.GetFirstContainerIndex()].Image != i.Vdb.Spec.Image { + cntImage, err := vk8s.GetServerImage(sts.Spec.Template.Spec.Containers) + if err != nil { + return false, err + } + if cntImage != i.Vdb.Spec.Image { return true, nil } } @@ -206,13 +210,15 @@ func (i *UpgradeManager) updateImageInStatefulSets(ctx context.Context) (int, er func (i *UpgradeManager) updateImageInStatefulSet(ctx context.Context, sts *appsv1.StatefulSet) (bool, error) { stsUpdated := false // Skip the statefulset if it already has the proper image. - cnts := sts.Spec.Template.Spec.Containers - inx := names.GetServerContainerIndex(i.Vdb) - if len(cnts) > inx && cnts[inx].Image != i.Vdb.Spec.Image { + svrCnt := vk8s.GetServerContainer(sts.Spec.Template.Spec.Containers) + if svrCnt == nil { + return false, fmt.Errorf("could not find the server container in the sts %s", sts.Name) + } + if svrCnt.Image != i.Vdb.Spec.Image { i.Log.Info("Updating image in old statefulset", "name", sts.ObjectMeta.Name) - sts.Spec.Template.Spec.Containers[inx].Image = i.Vdb.Spec.Image - if i.Vdb.IsNMASideCarDeploymentEnabled() { - sts.Spec.Template.Spec.Containers[names.GetNMAContainerIndex()].Image = i.Vdb.Spec.Image + svrCnt.Image = i.Vdb.Spec.Image + if nmaCnt := vk8s.GetNMAContainer(sts.Spec.Template.Spec.Containers); nmaCnt != nil { + nmaCnt.Image = i.Vdb.Spec.Image } // We change the update strategy to OnDelete. We don't want the k8s // sts controller to interphere and do a rolling update after the @@ -253,8 +259,11 @@ func (i *UpgradeManager) deletePodsRunningOldImage(ctx context.Context, scName s } // Skip the pod if it already has the proper image. - cnts := pod.Spec.Containers - if cnts[names.GetFirstContainerIndex()].Image != i.Vdb.Spec.Image { + cntImage, err := vk8s.GetServerImage(pod.Spec.Containers) + if err != nil { + return numPodsDeleted, err + } + if cntImage != i.Vdb.Spec.Image { i.Log.Info("Deleting pod that had old image", "name", pod.ObjectMeta.Name) err = i.VRec.Client.Delete(ctx, pod) if err != nil { @@ -275,7 +284,11 @@ func (i *UpgradeManager) deleteStsRunningOldImage(ctx context.Context) error { for inx := range stss.Items { sts := &stss.Items[inx] - if sts.Spec.Template.Spec.Containers[names.GetFirstContainerIndex()].Image != i.Vdb.Spec.Image { + cntImage, err := vk8s.GetServerImage(sts.Spec.Template.Spec.Containers) + if err != nil { + return err + } + if cntImage != i.Vdb.Spec.Image { i.Log.Info("Deleting sts that had old image", "name", sts.ObjectMeta.Name) err = i.VRec.Client.Delete(ctx, sts) if err != nil { diff --git a/pkg/controllers/vdb/upgrade_test.go b/pkg/controllers/vdb/upgrade_test.go index 70823a528..da9f56b7a 100644 --- a/pkg/controllers/vdb/upgrade_test.go +++ b/pkg/controllers/vdb/upgrade_test.go @@ -24,6 +24,7 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/names" "github.com/vertica/vertica-kubernetes/pkg/test" + "github.com/vertica/vertica-kubernetes/pkg/vk8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -112,11 +113,14 @@ var _ = Describe("upgrade", func() { Expect(stsChange).Should(Equal(2)) sts := &appsv1.StatefulSet{} - inx := names.GetServerContainerIndex(vdb) Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[0]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[inx].Image).Should(Equal(NewImage)) + svrCnt := vk8s.GetServerContainer(sts.Spec.Template.Spec.Containers) + Expect(svrCnt).ShouldNot(BeNil()) + Expect(svrCnt.Image).Should(Equal(NewImage)) Expect(k8sClient.Get(ctx, names.GenStsName(vdb, &vdb.Spec.Subclusters[1]), sts)).Should(Succeed()) - Expect(sts.Spec.Template.Spec.Containers[inx].Image).Should(Equal(NewImage)) + svrCnt = vk8s.GetServerContainer(sts.Spec.Template.Spec.Containers) + Expect(svrCnt).ShouldNot(BeNil()) + Expect(svrCnt.Image).Should(Equal(NewImage)) }) It("should delete pods of all subclusters", func() { diff --git a/pkg/names/names.go b/pkg/names/names.go index 8c6fd3b3e..ac571b224 100644 --- a/pkg/names/names.go +++ b/pkg/names/names.go @@ -20,7 +20,6 @@ import ( vapi "github.com/vertica/vertica-kubernetes/api/v1" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -29,11 +28,6 @@ const ( NMAContainer = "nma" ) -const ( - firstContainerIndex = iota - serverContainerIndex -) - // GenNamespacedName will take any name and make it a namespace name that uses // the same namespace as the VerticaDB. func GenNamespacedName(vdb *vapi.VerticaDB, name string) types.NamespacedName { @@ -105,34 +99,3 @@ func GenPVName(vdb *vapi.VerticaDB, sc *vapi.Subcluster, podIndex int32) types.N Name: fmt.Sprintf("pv-%s-%s-%s-%d", vapi.LocalDataPVC, vdb.Name, sc.GenCompatibleFQDN(), podIndex), } } - -// GetServerContainerIndex returns the correct server container -// index according to the NMA running mode. If there is no nma -// container, the server will be the first, otherwise the second -func GetServerContainerIndex(vdb *vapi.VerticaDB) int { - if vdb.IsNMASideCarDeploymentEnabled() { - return serverContainerIndex - } - return firstContainerIndex -} - -// GetServerContainerIndexInContainers returns the server index in a given -// containers slice coming from a Vertica pod -func GetServerContainerIndexInSlice(cnts []corev1.Container) int { - // the server can either be the first or the 2nd container(if nma sidecar is enabled) - if cnts[firstContainerIndex].Name == ServerContainer { - return firstContainerIndex - } - return serverContainerIndex -} - -// GetNMAContainerIndex returns the index of -// the NMA container -func GetNMAContainerIndex() int { - // nma when in a sidecar is always the first container - return firstContainerIndex -} - -func GetFirstContainerIndex() int { - return firstContainerIndex -} diff --git a/pkg/vk8s/container.go b/pkg/vk8s/container.go new file mode 100644 index 000000000..5e2afd80e --- /dev/null +++ b/pkg/vk8s/container.go @@ -0,0 +1,54 @@ +/* + (c) Copyright [2021-2023] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vk8s + +import ( + "errors" + + "github.com/vertica/vertica-kubernetes/pkg/names" + corev1 "k8s.io/api/core/v1" +) + +// GetServerContainer returns a pointer to the container that runs the Vertica +// server +func GetServerContainer(cnts []corev1.Container) *corev1.Container { + return getNamedContainer(cnts, names.ServerContainer) +} + +// GetNMAContainer returns a pointer to the container that runs the node +// management agent. +func GetNMAContainer(cnts []corev1.Container) *corev1.Container { + return getNamedContainer(cnts, names.NMAContainer) +} + +func getNamedContainer(cnts []corev1.Container, cntName string) *corev1.Container { + for i := range cnts { + if cnts[i].Name == cntName { + return &cnts[i] + } + } + return nil +} + +// GetServerImage returns the name of the image used for the server container. +// It returns an error if it cannot find the server container. +func GetServerImage(cnts []corev1.Container) (string, error) { + cnt := GetServerContainer(cnts) + if cnt == nil { + return "", errors.New("could not find the server container") + } + return cnt.Image, nil +} diff --git a/pkg/vk8s/container_test.go b/pkg/vk8s/container_test.go new file mode 100644 index 000000000..b2231e5e7 --- /dev/null +++ b/pkg/vk8s/container_test.go @@ -0,0 +1,47 @@ +/* + (c) Copyright [2021-2023] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vk8s + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + vapi "github.com/vertica/vertica-kubernetes/api/v1" + vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" + "github.com/vertica/vertica-kubernetes/pkg/names" + "github.com/vertica/vertica-kubernetes/pkg/test" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("vk8s/container_test", func() { + ctx := context.Background() + + It("should find the server container in the spec", func() { + vdb := vapi.MakeVDBForHTTP("v-nma-tls-abcde") + vdb.Spec.Image = vapi.NMAInSideCarDeploymentMinVersion + vdb.Annotations[vmeta.VClusterOpsAnnotation] = vmeta.VClusterOpsAnnotationTrue + vdb.Annotations[vmeta.VersionAnnotation] = vapi.NMAInSideCarDeploymentMinVersion + test.CreatePods(ctx, k8sClient, vdb, test.AllPodsRunning) + defer test.DeletePods(ctx, k8sClient, vdb) + + pod := corev1.Pod{} + Ω(k8sClient.Get(ctx, names.GenPodName(vdb, &vdb.Spec.Subclusters[0], 0), &pod)).Should(Succeed()) + Ω(GetServerContainer(pod.Spec.Containers)).ShouldNot(BeNil()) + Ω(GetServerImage(pod.Spec.Containers)).Should(Equal(vapi.NMAInSideCarDeploymentMinVersion)) + Ω(GetNMAContainer(pod.Spec.Containers)).ShouldNot(BeNil()) + }) +})