Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pipeline ipv6 datapath #2069

Merged
merged 40 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
cb7d932
add Linux test cases for connectivity
paulyufan2 Jun 22, 2023
60e384d
pipeline ipv6 test cases
paulyufan2 Jul 21, 2023
4344d7e
add linux ipv6 yamls
paulyufan2 Jul 21, 2023
dd85072
add deployment yamls
paulyufan2 Jul 21, 2023
341ac63
remove duplicated linux deloyment files
paulyufan2 Jul 21, 2023
297b3f4
add linux datapath
paulyufan2 Jul 21, 2023
b15314a
add windows test
paulyufan2 Jul 21, 2023
e9faea0
change datapath windows file name
paulyufan2 Jul 21, 2023
7e634a0
fix datapath windows test
paulyufan2 Jul 21, 2023
d564745
fix datapath windows test
paulyufan2 Jul 21, 2023
004e2d7
scripts to cleanup ovs bridge and ovs leaked rules (#2066)
paulyufan2 Jul 24, 2023
5a40bff
fix comments
paulyufan2 Jul 24, 2023
60e90e6
fix a minor issue
paulyufan2 Jul 24, 2023
e5b1718
remove conflicts
paulyufan2 Jul 24, 2023
b154ab6
fix comment
paulyufan2 Jul 24, 2023
ed2002d
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 24, 2023
2b3a7fe
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 25, 2023
a13b9de
fix comments
paulyufan2 Jul 25, 2023
8622773
rerun test
paulyufan2 Jul 25, 2023
1e1ae11
rerun test
paulyufan2 Jul 25, 2023
7478d7f
fix comments
paulyufan2 Jul 25, 2023
a8642c8
change namespace back to default
paulyufan2 Jul 25, 2023
0273ccf
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 25, 2023
dc40c96
add namespace fixes
paulyufan2 Jul 25, 2023
bac5b33
add pipeline
paulyufan2 Jul 25, 2023
45ea65d
add pipeline
paulyufan2 Jul 25, 2023
2c70b7f
add logs
paulyufan2 Jul 26, 2023
69b112f
fix dualstack pipeline setup
paulyufan2 Jul 26, 2023
01e025f
add AzureOverlayDualStackPreview
paulyufan2 Jul 26, 2023
4d4e0fc
delete pipeline templates
paulyufan2 Jul 26, 2023
988c530
put installdualstackoverlayp
paulyufan2 Jul 26, 2023
3b3904e
fix comments
paulyufan2 Jul 26, 2023
d65b071
fix comments
paulyufan2 Jul 26, 2023
a4c3e41
fix comments
paulyufan2 Jul 26, 2023
7c15bef
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 26, 2023
50477d4
remove readme for dualstack
paulyufan2 Jul 26, 2023
d828c04
comment fix
paulyufan2 Jul 26, 2023
533e4ef
fix comments
paulyufan2 Jul 26, 2023
0c9d86f
fix logs
paulyufan2 Jul 26, 2023
345bac2
fix error
paulyufan2 Jul 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
325 changes: 325 additions & 0 deletions test/integration/datapath/dapapath_linux_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,325 @@
//go:build connection

package connection

import (
"context"
"flag"
"fmt"
"net"
"os"
"testing"
"time"

"github.com/Azure/azure-container-networking/test/integration"
"github.com/Azure/azure-container-networking/test/integration/goldpinger"
k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils"
"github.com/Azure/azure-container-networking/test/internal/retry"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"

appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml"
LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml"
podLabelKey = "app"
podCount = 2
nodepoolKey = "agentpool"
maxRetryDelaySeconds = 10
defaultTimeoutSeconds = 120
defaultRetryDelaySeconds = 1
goldpingerRetryCount = 24
goldpingerDelayTimeSeconds = 5
gpFolder = "../manifests/goldpinger"
gpClusterRolePath = gpFolder + "/cluster-role.yaml"
gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml"
gpServiceAccountPath = gpFolder + "/service-account.yaml"
gpDaemonset = gpFolder + "/daemonset.yaml"
gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml"
gpDeployment = gpFolder + "/deployment.yaml"
)

var (
podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods")
podNamespace = flag.String("namespace", "default", "Namespace for test pods")
nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Node-Selector for pods")
testProfile = flag.String("testName", LinuxDeployIPV4, "Linux datapath test profile")
defaultRetrier = retry.Retrier{
Attempts: 10,
Delay: defaultRetryDelaySeconds * time.Second,
}
)

/*
This test assumes that you have the current credentials loaded in your default kubeconfig for a
k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes.
*** The expected nodepool name is npwin, if the nodepool has a diferent name ensure that you change nodepoolSelector with:
-nodepoolSelector="yournodepoolname"

To run the test use one of the following commands:
go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration
or
go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=aks-pool1 -tags=connection,integration


This test checks pod to pod, pod to node, pod to Internet check

Timeout context is controled by the -timeout flag.

*/

// return podLabelSelector and nodeLabelSelector
func createLabelSelectors() (string, string) {
return fmt.Sprintf("%s=%s", podLabelKey, *podPrefix), fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector)
}

func setupLinuxEnvironment(t *testing.T) {
ctx := context.Background()

t.Log("Create Clientset")
clientset, err := k8sutils.MustGetClientset()
if err != nil {
require.NoError(t, err, "could not get k8s clientset: %v", err)
}

t.Log("Create Label Selectors")
podLabelSelector, nodeLabelSelector := createLabelSelectors()

t.Log("Get Nodes")
nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector)
if err != nil {
require.NoError(t, err, "could not get k8s node list: %v", err)
}

createPodFlag := !(apierrors.IsAlreadyExists(err))
t.Logf("%v", createPodFlag)

if createPodFlag {
var daemonset appsv1.DaemonSet
t.Log("Creating Linux pods through deployment")
deployment, err := k8sutils.MustParseDeployment(*testProfile)
if err != nil {
require.NoError(t, err)
}

if *testProfile == LinuxDeployIPV4 {
daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset)
if err != nil {
t.Fatal(err)
}
} else {
daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6)
if err != nil {
t.Fatal(err)
}
}

rbacCleanUpFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath)
if err != nil {
t.Log(os.Getwd())
t.Fatal(err)
}

// Fields for overwritting existing deployment yaml.
// Defaults from flags will not change anything
deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix
deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix
deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector
deployment.Name = *podPrefix
deployment.Namespace = *podNamespace

deploymentsClient := clientset.AppsV1().Deployments(*podNamespace)
err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment)
if err != nil {
require.NoError(t, err)
}

daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace)
err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset)
if err != nil {
t.Fatal(err)
}

t.Cleanup(func() {
t.Log("cleaning up resources")
rbacCleanUpFn()

if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}

if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
})

t.Log("Waiting for pods to be running state")
err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector)
if err != nil {
require.NoError(t, err)
}
t.Log("Successfully created customer linux pods")
} else {
t.Log("Checking for pods to be running state")
err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector)
if err != nil {
require.NoError(t, err)
}
}

t.Log("Checking Linux test environment")
for _, node := range nodes.Items {
pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name)
if err != nil {
require.NoError(t, err, "could not get k8s clientset: %v", err)
}
if len(pods.Items) <= 1 {
t.Logf("%s", node.Name)
require.NoError(t, errors.New("Less than 2 pods on node"))
}

}
t.Log("Linux test environment ready")
}

func TestDatapathLinux(t *testing.T) {
ctx := context.Background()

t.Log("Get REST config")
restConfig := k8sutils.MustGetRestConfig(t)

t.Log("Create Clientset")
clientset, _ := k8sutils.MustGetClientset()

setupLinuxEnvironment(t)
podLabelSelector, _ := createLabelSelectors()

t.Run("Linux ping tests", func(t *testing.T) {
// Check goldpinger health
t.Run("all pods have IPs assigned", func(t *testing.T) {
podsClient := clientset.CoreV1().Pods(*podNamespace)

checkPodIPsFn := func() error {
podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"})
if err != nil {
return err
}

if len(podList.Items) == 0 {
return errors.New("no pods scheduled")
}

for _, pod := range podList.Items {
if pod.Status.Phase == apiv1.PodPending {
return errors.New("some pods still pending")
}
}

for _, pod := range podList.Items {
if pod.Status.PodIP == "" {
return errors.New("a pod has not been allocated an IP")
}
}

return nil
}
err := defaultRetrier.Do(ctx, checkPodIPsFn)
if err != nil {
t.Fatalf("not all pods were allocated IPs: %v", err)
}
t.Log("all pods have been allocated IPs")
})

// TODO: avoid using yaml file path to control test case
if *testProfile == LinuxDeployIPv6 {
t.Run("Linux dualstack overlay tests", func(t *testing.T) {
t.Run("test dualstack overlay", func(t *testing.T) {
podsClient := clientset.CoreV1().Pods(*podNamespace)

checkPodIPsFn := func() error {
podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"})
if err != nil {
return err
}

for _, pod := range podList.Items {
podIPs := pod.Status.PodIPs
if len(podIPs) < 2 {
return errors.New("a pod only gets one IP")
}
if net.ParseIP(podIPs[0].IP).To4() == nil || net.ParseIP(podIPs[1].IP).To16() == nil {
return errors.New("a pod does not have both ipv4 and ipv6 address")
}
}
return nil
}
err := defaultRetrier.Do(ctx, checkPodIPsFn)
if err != nil {
t.Fatalf("dualstack overlay pod properties check is failed due to: %v", err)
}

t.Log("all dualstack linux pods properties have been verified")
})
})
}

t.Run("all linux pods can ping each other", func(t *testing.T) {
clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()

pfOpts := k8s.PortForwardingOpts{
Namespace: *podNamespace,
LabelSelector: podLabelSelector,
LocalPort: 9090,
DestPort: 8080,
}

pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts)
if err != nil {
t.Fatal(err)
}

portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second)
defer cancel()

portForwardFn := func() error {
err := pf.Forward(portForwardCtx)
if err != nil {
t.Logf("unable to start port forward: %v", err)
return err
}
return nil
}
if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil {
t.Fatalf("could not start port forward within %ds: %v", defaultTimeoutSeconds, err)
}
defer pf.Stop()

gpClient := goldpinger.Client{Host: pf.Address()}
clusterCheckFn := func() error {
clusterState, err := gpClient.CheckAll(clusterCheckCtx)
if err != nil {
return err
}
stats := goldpinger.ClusterStats(clusterState)
stats.PrintStats()
if stats.AllPingsHealthy() {
return nil
}

return errors.New("not all pings are healthy")
}
retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second}
if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil {
t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err)
}

t.Log("all pings successful!")
})
})
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"context"
"flag"
"fmt"
"net"
"testing"

"github.com/Azure/azure-container-networking/test/internal/datapath"
Expand Down Expand Up @@ -36,9 +37,9 @@ k8s cluster with a windows nodepool consisting of at least 2 windows nodes.
-nodepoolSelector="yournodepoolname"

To run the test use one of the following commands:
go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection
go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection
or
go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection
go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection


This test checks pod to pod, pod to node, and pod to internet for datapath connectivity.
Expand Down Expand Up @@ -71,6 +72,9 @@ func TestDatapathWin(t *testing.T) {
// Test Namespace
t.Log("Create Namespace")
err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace)
if err != nil {
require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err)
}
createPodFlag := !(apierrors.IsAlreadyExists(err))

if createPodFlag {
Expand Down Expand Up @@ -110,7 +114,7 @@ func TestDatapathWin(t *testing.T) {
require.NoError(t, err)
}
}
t.Log("Checking Windows test environment ")
t.Log("Checking Windows test environment")
for _, node := range nodes.Items {

pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name)
Expand All @@ -129,18 +133,27 @@ func TestDatapathWin(t *testing.T) {
for _, node := range nodes.Items {
t.Log("Windows ping tests (1)")
nodeIP := ""
nodeIPv6 := ""
for _, address := range node.Status.Addresses {
if address.Type == "InternalIP" {
nodeIP = address.Address
// Multiple addresses exist, break once Internal IP found.
// Cannot call directly
if net.ParseIP(address.Address).To16() != nil {
nodeIPv6 = address.Address
}
break
}
}

err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIP, *podNamespace, podLabelSelector, restConfig)
require.NoError(t, err, "Windows pod to node, ping test failed with: %+v", err)
t.Logf("Windows pod to node, passed for node: %s", node.Name)

// windows ipv6 connectivity
if nodeIPv6 != "" {
err = datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIPv6, *podNamespace, podLabelSelector, restConfig)
require.NoError(t, err, "Windows pod to node, ipv6 ping test failed with: %+v", err)
t.Logf("Windows pod to node via ipv6, passed for node: %s", node.Name)
}
}
})

Expand Down
1 change: 1 addition & 0 deletions test/integration/goldpinger/client.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
//go:build integration
// +build integration

package goldpinger
Expand Down
Loading