Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pipeline ipv6 datapath #2069

Merged
merged 40 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from 39 commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
cb7d932
add Linux test cases for connectivity
paulyufan2 Jun 22, 2023
60e384d
pipeline ipv6 test cases
paulyufan2 Jul 21, 2023
4344d7e
add linux ipv6 yamls
paulyufan2 Jul 21, 2023
dd85072
add deployment yamls
paulyufan2 Jul 21, 2023
341ac63
remove duplicated linux deloyment files
paulyufan2 Jul 21, 2023
297b3f4
add linux datapath
paulyufan2 Jul 21, 2023
b15314a
add windows test
paulyufan2 Jul 21, 2023
e9faea0
change datapath windows file name
paulyufan2 Jul 21, 2023
7e634a0
fix datapath windows test
paulyufan2 Jul 21, 2023
d564745
fix datapath windows test
paulyufan2 Jul 21, 2023
004e2d7
scripts to cleanup ovs bridge and ovs leaked rules (#2066)
paulyufan2 Jul 24, 2023
5a40bff
fix comments
paulyufan2 Jul 24, 2023
60e90e6
fix a minor issue
paulyufan2 Jul 24, 2023
e5b1718
remove conflicts
paulyufan2 Jul 24, 2023
b154ab6
fix comment
paulyufan2 Jul 24, 2023
ed2002d
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 24, 2023
2b3a7fe
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 25, 2023
a13b9de
fix comments
paulyufan2 Jul 25, 2023
8622773
rerun test
paulyufan2 Jul 25, 2023
1e1ae11
rerun test
paulyufan2 Jul 25, 2023
7478d7f
fix comments
paulyufan2 Jul 25, 2023
a8642c8
change namespace back to default
paulyufan2 Jul 25, 2023
0273ccf
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 25, 2023
dc40c96
add namespace fixes
paulyufan2 Jul 25, 2023
bac5b33
add pipeline
paulyufan2 Jul 25, 2023
45ea65d
add pipeline
paulyufan2 Jul 25, 2023
2c70b7f
add logs
paulyufan2 Jul 26, 2023
69b112f
fix dualstack pipeline setup
paulyufan2 Jul 26, 2023
01e025f
add AzureOverlayDualStackPreview
paulyufan2 Jul 26, 2023
4d4e0fc
delete pipeline templates
paulyufan2 Jul 26, 2023
988c530
put installdualstackoverlayp
paulyufan2 Jul 26, 2023
3b3904e
fix comments
paulyufan2 Jul 26, 2023
d65b071
fix comments
paulyufan2 Jul 26, 2023
a4c3e41
fix comments
paulyufan2 Jul 26, 2023
7c15bef
Merge branch 'master' into ipv6datapathpipeline
paulyufan2 Jul 26, 2023
50477d4
remove readme for dualstack
paulyufan2 Jul 26, 2023
d828c04
comment fix
paulyufan2 Jul 26, 2023
533e4ef
fix comments
paulyufan2 Jul 26, 2023
0c9d86f
fix logs
paulyufan2 Jul 26, 2023
345bac2
fix error
paulyufan2 Jul 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions hack/aks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@ SWIFT Infra
net-up Create required swift vnet/subnets

AKS Clusters
byocni-up Alias to swift-byocni-up
cilium-up Alias to swift-cilium-up
up Alias to swift-up
overlay-up Brings up an Overlay AzCNI cluster
swift-byocni-up Bring up a SWIFT BYO CNI cluster
swift-cilium-up Bring up a SWIFT Cilium cluster
swift-up Bring up a SWIFT AzCNI cluster
windows-cniv1-up Bring up a Windows AzCNIv1 cluster
down Delete the cluster
vmss-restart Restart the nodes of the cluster
byocni-up Alias to swift-byocni-up
cilium-up Alias to swift-cilium-up
up Alias to swift-up
overlay-up Brings up an Overlay AzCNI cluster
swift-byocni-up Bring up a SWIFT BYO CNI cluster
swift-cilium-up Bring up a SWIFT Cilium cluster
swift-up Bring up a SWIFT AzCNI cluster
windows-cniv1-up Bring up a Windows AzCNIv1 cluster
down Delete the cluster
vmss-restart Restart the nodes of the cluster
```
298 changes: 298 additions & 0 deletions test/integration/datapath/datapath_linux_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,298 @@
//go:build connection

package connection

import (
"context"
"flag"
"net"
"os"
"testing"
"time"

"github.com/Azure/azure-container-networking/test/integration"
"github.com/Azure/azure-container-networking/test/integration/goldpinger"
k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils"
"github.com/Azure/azure-container-networking/test/internal/retry"
"github.com/pkg/errors"

appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
podLabelKey = "app"
podCount = 2
nodepoolKey = "agentpool"
LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml"
LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml"
maxRetryDelaySeconds = 10
defaultTimeoutSeconds = 120
defaultRetryDelaySeconds = 1
goldpingerRetryCount = 24
goldpingerDelayTimeSeconds = 5
gpFolder = "../manifests/goldpinger"
gpClusterRolePath = gpFolder + "/cluster-role.yaml"
gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml"
gpServiceAccountPath = gpFolder + "/service-account.yaml"
gpDaemonset = gpFolder + "/daemonset.yaml"
gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml"
gpDeployment = gpFolder + "/deployment.yaml"
)

var (
podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods")
podNamespace = flag.String("namespace", "default", "Namespace for test pods")
nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Linux Node-Selector for pods")
// TODO: add flag to support dual nic scenario
isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario")
defaultRetrier = retry.Retrier{
Attempts: 10,
Delay: defaultRetryDelaySeconds * time.Second,
}
)

/*
This test assumes that you have the current credentials loaded in your default kubeconfig for a
k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes.
*** The expected nodepool name is nodepool1, if the nodepool has a different name ensure that you change nodepoolSelector with:
-nodepoolSelector="yournodepoolname"

To run the test use one of the following commands:
go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration
or
go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=aks-pool1 -tags=connection,integration


This test checks pod to pod, pod to node, pod to Internet check

Timeout context is controled by the -timeout flag.

*/

func setupLinuxEnvironment(t *testing.T) {
ctx := context.Background()

t.Log("Create Clientset")
clientset, err := k8sutils.MustGetClientset()
if err != nil {
t.Fatalf("could not get k8s clientset: %v", err)
}

t.Log("Create Label Selectors")
podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix)
nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector)

t.Log("Get Nodes")
nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector)
if err != nil {
t.Fatalf("could not get k8s node list: %v", err)
}

t.Log("Creating Linux pods through deployment")

// run goldpinger ipv4 and ipv6 test cases saperately
var daemonset appsv1.DaemonSet
var deployment appsv1.Deployment

if *isDualStack {
deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6)
if err != nil {
t.Fatal(err)
}

daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6)
if err != nil {
t.Fatal(err)
}
} else {
deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4)
if err != nil {
t.Fatal(err)
}

daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset)
if err != nil {
t.Fatal(err)
}
}

// setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount
rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath)
if err != nil {
t.Log(os.Getwd())
t.Fatal(err)
}

// Fields for overwritting existing deployment yaml.
// Defaults from flags will not change anything
deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix
deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix
deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector
deployment.Name = *podPrefix
deployment.Namespace = *podNamespace
daemonset.Namespace = *podNamespace

deploymentsClient := clientset.AppsV1().Deployments(*podNamespace)
err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment)
if err != nil {
t.Fatal(err)
}

daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace)
err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset)
if err != nil {
t.Fatal(err)
}

t.Cleanup(func() {
t.Log("cleaning up resources")
rbacSetupFn()

if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}

if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil {
t.Log(err)
}
})

t.Log("Waiting for pods to be running state")
err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector)
if err != nil {
t.Fatalf("Pods are not in running state due to %+v", err)
}

if *isDualStack {
t.Log("Successfully created customer dualstack Linux pods")
} else {
t.Log("Successfully created customer singlestack Linux pods")
}

t.Log("Checking Linux test environment")
for _, node := range nodes.Items {
pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name)
if err != nil {
t.Fatalf("could not get k8s clientset: %v", err)
}
if len(pods.Items) <= 1 {
t.Logf("%s", node.Name)
t.Fatal("Less than 2 pods on node")
}
}

t.Log("Linux test environment ready")
}

func TestDatapathLinux(t *testing.T) {
ctx := context.Background()

t.Log("Get REST config")
restConfig := k8sutils.MustGetRestConfig(t)

t.Log("Create Clientset")
clientset, _ := k8sutils.MustGetClientset()

setupLinuxEnvironment(t)
podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix)

t.Run("Linux ping tests", func(t *testing.T) {
// Check goldpinger health
t.Run("all pods have IPs assigned", func(t *testing.T) {
err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector)
if err != nil {
t.Fatalf("Pods are not in running state due to %+v", err)
}
t.Log("all pods have been allocated IPs")
})

if *isDualStack {
t.Run("Linux dualstack overlay tests", func(t *testing.T) {
t.Run("test dualstack overlay", func(t *testing.T) {
podsClient := clientset.CoreV1().Pods(*podNamespace)

checkPodIPsFn := func() error {
podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"})
if err != nil {
return err
}

for _, pod := range podList.Items {
podIPs := pod.Status.PodIPs
if len(podIPs) < 2 {
return errors.New("a pod only gets one IP")
}
if net.ParseIP(podIPs[0].IP).To4() == nil || net.ParseIP(podIPs[1].IP).To16() == nil {
return errors.New("a pod does not have both ipv4 and ipv6 address")
}
}
return nil
}
err := defaultRetrier.Do(ctx, checkPodIPsFn)
if err != nil {
t.Fatalf("dualstack overlay pod properties check is failed due to: %v", err)
}

t.Log("all dualstack linux pods properties have been verified")
})
})
}

t.Run("all linux pods can ping each other", func(t *testing.T) {
clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()

pfOpts := k8s.PortForwardingOpts{
Namespace: *podNamespace,
LabelSelector: podLabelSelector,
LocalPort: 9090,
DestPort: 8080,
}

pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts)
if err != nil {
t.Fatal(err)
}

portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second)
defer cancel()

portForwardFn := func() error {
err := pf.Forward(portForwardCtx)
if err != nil {
t.Logf("unable to start port forward: %v", err)
return err
}
return nil
}

if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil {
t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err)
}
defer pf.Stop()

gpClient := goldpinger.Client{Host: pf.Address()}
clusterCheckFn := func() error {
clusterState, err := gpClient.CheckAll(clusterCheckCtx)
if err != nil {
return err
}
stats := goldpinger.ClusterStats(clusterState)
stats.PrintStats()
if stats.AllPingsHealthy() {
return nil
}

return errors.New("not all pings are healthy")
}
retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second}
if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil {
t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err)
}

t.Log("all pings successful!")
})
})
}
Loading