diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2813d90e..5d7fb104 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -77,6 +77,15 @@ jobs: BLIXT_DATAPLANE_IMAGE: "ghcr.io/kong/blixt-dataplane" BLIXT_UDP_SERVER_IMAGE: "ghcr.io/kong/blixt-udp-test-server" TAG: "integration-tests" + + - name: run integration tests with bpfd + run: make test.integration + env: + BLIXT_CONTROLPLANE_IMAGE: "ghcr.io/kong/blixt-controlplane" + BLIXT_DATAPLANE_IMAGE: "ghcr.io/kong/blixt-dataplane" + BLIXT_UDP_SERVER_IMAGE: "ghcr.io/kong/blixt-udp-test-server" + BLIXT_USE_BPFD: true + TAG: "integration-tests" ## Upload diagnostics if integration test step failed. - name: upload diagnostics diff --git a/Makefile b/Makefile index c1bbc3b9..56305380 100644 --- a/Makefile +++ b/Makefile @@ -188,6 +188,10 @@ ifndef ignore-not-found ignore-not-found = false endif +.PHONY: install-bpfd +install-bpfd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/bpfd | kubectl apply -f - + .PHONY: install install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - @@ -316,3 +320,10 @@ build.cluster: $(KTF) # builds a KIND cluster which can be used for testing and load.image: build.image kind load docker-image $(BLIXT_CONTROLPLANE_IMAGE):$(TAG) --name $(KIND_CLUSTER) && \ kubectl -n blixt-system rollout restart deployment blixt-controlplane + +.PHONY: load.all.images +load.all.images: build.all.images + kind load docker-image $(BLIXT_CONTROLPLANE_IMAGE):$(TAG) --name $(KIND_CLUSTER) && \ + kind load docker-image $(BLIXT_DATAPLANE_IMAGE):$(TAG) --name $(KIND_CLUSTER) && \ + kind load docker-image $(BLIXT_UDP_SERVER_IMAGE):$(TAG) --name $(KIND_CLUSTER) && \ + kubectl -n blixt-system rollout restart deployment blixt-controlplane diff --git a/config/bpf-bytecode/kustomization.yaml b/config/bpf-bytecode/kustomization.yaml new file mode 100644 index 00000000..d505e0e7 --- /dev/null +++ b/config/bpf-bytecode/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- tc-ingress.yaml +- tc-egress.yaml diff --git a/config/bpf-bytecode/tc-egress.yaml b/config/bpf-bytecode/tc-egress.yaml new file mode 100644 index 00000000..73b18b73 --- /dev/null +++ b/config/bpf-bytecode/tc-egress.yaml @@ -0,0 +1,21 @@ +apiVersion: bpfd.dev/v1alpha1 +kind: TcProgram +metadata: + labels: + app.kubernetes.io/name: blixt-tc-egress + name: tc-egress +spec: + bpffunctionname: tc_egress + # Select all nodes + nodeselector: {} + interfaceselector: + primarynodeinterface: true + priority: 0 + direction: egress + bytecode: + image: + url: quay.io/bpfd-bytecode/blixt-tc-egress:latest + imagepullpolicy: Always + mapownerselector: + matchLabels: + bpfd.dev/ownedByProgram: blixt-tc-ingress diff --git a/config/bpf-bytecode/tc-ingress.yaml b/config/bpf-bytecode/tc-ingress.yaml new file mode 100644 index 00000000..8ce35773 --- /dev/null +++ b/config/bpf-bytecode/tc-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: bpfd.dev/v1alpha1 +kind: TcProgram +metadata: + labels: + app.kubernetes.io/name: blixt-tc-ingress + name: tc-ingress +spec: + bpffunctionname: tc_ingress + # Select all nodes + nodeselector: {} + interfaceselector: + primarynodeinterface: true + priority: 0 + direction: ingress + bytecode: + image: + url: quay.io/bpfd-bytecode/blixt-tc-ingress:latest + imagepullpolicy: Always diff --git a/config/bpfd-install/kustomization.yaml b/config/bpfd-install/kustomization.yaml new file mode 100644 index 00000000..a6abf901 --- /dev/null +++ b/config/bpfd-install/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +## bpfd CRDs +- https://github.com/bpfd-dev/bpfd/releases/download/v0.3.0/bpfd-crds-install-v0.3.0.yaml +## bpfd Operator +- https://github.com/bpfd-dev/bpfd/releases/download/v0.3.0/bpfd-operator-install-v0.3.0.yaml + +patches: +- path: patch.yaml + target: + kind: ConfigMap + name: config + version: v1 + name: bpfd-config diff --git a/config/bpfd-install/patch.yaml b/config/bpfd-install/patch.yaml new file mode 100644 index 00000000..10f002e9 --- /dev/null +++ b/config/bpfd-install/patch.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + ## Can be configured at runtime + bpfd.log.level: "debug" + bpfd.agent.log.level: "debug" + bpfd.enable.csi: "true" + ## Custom temporary blixt build + bpfd.image: quay.io/bpfd/bpfd:blixt diff --git a/config/bpfd/bpfd.yaml b/config/bpfd/bpfd.yaml new file mode 100644 index 00000000..1c9cb956 --- /dev/null +++ b/config/bpfd/bpfd.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: dataplane + namespace: system +spec: + template: + spec: + containers: + - name: dataplane + securityContext: + privileged: false + volumeMounts: + - name: bpf-maps + mountPath: /run/bpfd/fs/maps + readOnly: true + volumes: + - name: bpf-maps + csi: + driver: csi.bpfd.dev + volumeAttributes: + csi.bpfd.dev/program: blixt-tc-egress + csi.bpfd.dev/maps: AYA_LOGS,AYA_LOG_BUF,BACKENDS,BLIXT_CONNTRACK,GATEWAY_INDEXES diff --git a/config/bpfd/kustomization.yaml b/config/bpfd/kustomization.yaml new file mode 100644 index 00000000..b98d7f7f --- /dev/null +++ b/config/bpfd/kustomization.yaml @@ -0,0 +1,50 @@ +# Adds namespace to all resources. +namespace: blixt-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: blixt- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +# - ../crd # TODO: no CRDs yet +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controlplane to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. + +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../bpf-bytecode +- ../rbac +- ../manager +- ../dataplane +patches: +- path: bpfd.yaml diff --git a/config/tests/integration-bpfd/kustomization.yaml b/config/tests/integration-bpfd/kustomization.yaml new file mode 100644 index 00000000..8c4679c9 --- /dev/null +++ b/config/tests/integration-bpfd/kustomization.yaml @@ -0,0 +1,12 @@ + +images: +- name: ghcr.io/kong/blixt-dataplane + newTag: integration-tests +- name: ghcr.io/kong/blixt-controlplane + newTag: integration-tests +- name: ghcr.io/kong/blixt-udp-test-server + newTag: integration-tests +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../../bpfd diff --git a/dataplane/.cargo/config.toml b/dataplane/.cargo/config.toml index f0ccbc9a..029b94e5 100644 --- a/dataplane/.cargo/config.toml +++ b/dataplane/.cargo/config.toml @@ -1,2 +1,6 @@ [alias] -xtask = "run --package xtask --" \ No newline at end of file +xtask = "run --package xtask --" + +## Needed to build blixt's programs with BTF + custom bpf-linker build. +[build] +rustflags = ["-C", "debuginfo=2"] \ No newline at end of file diff --git a/dataplane/Makefile b/dataplane/Makefile index 76a6443a..100e0d9f 100644 --- a/dataplane/Makefile +++ b/dataplane/Makefile @@ -26,3 +26,25 @@ build.image: load.image: build.image kind load docker-image $(IMAGE):$(TAG) --name $(KIND_CLUSTER) && \ kubectl -n blixt-system rollout restart daemonset blixt-dataplane + +.PHONY: build.bytecode.images +build.bytecode.images: build + docker build \ + --build-arg PROGRAM_NAME=blixt-tc-ingress \ + --build-arg BPF_FUNCTION_NAME=tc_ingress \ + --build-arg PROGRAM_TYPE=tc \ + --build-arg BYTECODE_FILENAME=loader \ + -f https://raw.githubusercontent.com/bpfd-dev/bpfd/main/packaging/container-deployment/Containerfile.bytecode \ + ./target/bpfel-unknown-none/debug -t quay.io/bpfd-bytecode/blixt-tc-ingress:latest + docker build \ + --build-arg PROGRAM_NAME=blixt-tc-egress \ + --build-arg BPF_FUNCTION_NAME=tc_egress \ + --build-arg PROGRAM_TYPE=tc \ + --build-arg BYTECODE_FILENAME=loader \ + -f https://raw.githubusercontent.com/bpfd-dev/bpfd/main/packaging/container-deployment/Containerfile.bytecode \ + ./target/bpfel-unknown-none/debug -t quay.io/bpfd-bytecode/blixt-tc-egress:latest + +.PHONY: push.bytecode.images +push.bytecode.images: build.bytecode.images + docker push quay.io/bpfd-bytecode/blixt-tc-egress:latest + docker push quay.io/bpfd-bytecode/blixt-tc-ingress:latest diff --git a/dataplane/api-server/src/backends.rs b/dataplane/api-server/src/backends.rs index 5571964e..25b70150 100644 --- a/dataplane/api-server/src/backends.rs +++ b/dataplane/api-server/src/backends.rs @@ -45,8 +45,8 @@ pub struct InterfaceIndexConfirmation { /// Generated client implementations. pub mod backends_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; use tonic::codegen::http::Uri; + use tonic::codegen::*; #[derive(Debug, Clone)] pub struct BackendsClient { inner: tonic::client::Grpc, @@ -90,9 +90,8 @@ pub mod backends_client { >::ResponseBody, >, >, - , - >>::Error: Into + Send + Sync, + >>::Error: + Into + Send + Sync, { BackendsClient::new(InterceptedService::new(inner, interceptor)) } @@ -130,23 +129,16 @@ pub mod backends_client { pub async fn get_interface_index( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/backends.backends/GetInterfaceIndex", - ); + let path = http::uri::PathAndQuery::from_static("/backends.backends/GetInterfaceIndex"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("backends.backends", "GetInterfaceIndex")); @@ -156,38 +148,34 @@ pub mod backends_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/backends.backends/Update"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("backends.backends", "Update")); + req.extensions_mut() + .insert(GrpcMethod::new("backends.backends", "Update")); self.inner.unary(req, path, codec).await } pub async fn delete( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/backends.backends/Delete"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("backends.backends", "Delete")); + req.extensions_mut() + .insert(GrpcMethod::new("backends.backends", "Delete")); self.inner.unary(req, path, codec).await } } @@ -202,10 +190,7 @@ pub mod backends_server { async fn get_interface_index( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn update( &self, request: tonic::Request, @@ -238,10 +223,7 @@ pub mod backends_server { max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { @@ -297,21 +279,12 @@ pub mod backends_server { "/backends.backends/GetInterfaceIndex" => { #[allow(non_camel_case_types)] struct GetInterfaceIndexSvc(pub Arc); - impl tonic::server::UnaryService - for GetInterfaceIndexSvc { + impl tonic::server::UnaryService for GetInterfaceIndexSvc { type Response = super::InterfaceIndexConfirmation; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - (*inner).get_interface_index(request).await - }; + let fut = async move { (*inner).get_interface_index(request).await }; Box::pin(fut) } } @@ -341,13 +314,9 @@ pub mod backends_server { "/backends.backends/Update" => { #[allow(non_camel_case_types)] struct UpdateSvc(pub Arc); - impl tonic::server::UnaryService - for UpdateSvc { + impl tonic::server::UnaryService for UpdateSvc { type Response = super::Confirmation; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -383,17 +352,10 @@ pub mod backends_server { "/backends.backends/Delete" => { #[allow(non_camel_case_types)] struct DeleteSvc(pub Arc); - impl tonic::server::UnaryService - for DeleteSvc { + impl tonic::server::UnaryService for DeleteSvc { type Response = super::Confirmation; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { (*inner).delete(request).await }; Box::pin(fut) @@ -422,18 +384,14 @@ pub mod backends_server { }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), } } } diff --git a/dataplane/ebpf/.cargo/config.toml b/dataplane/ebpf/.cargo/config.toml index 5d7e5915..997f47a2 100644 --- a/dataplane/ebpf/.cargo/config.toml +++ b/dataplane/ebpf/.cargo/config.toml @@ -1,6 +1,7 @@ [build] target-dir = "../target" target = "bpfel-unknown-none" +rustflags = ["-C", "save-temps"] [unstable] build-std = ["core"] \ No newline at end of file diff --git a/dataplane/ebpf/Cargo.toml b/dataplane/ebpf/Cargo.toml index 1805d00c..e162a6cd 100644 --- a/dataplane/ebpf/Cargo.toml +++ b/dataplane/ebpf/Cargo.toml @@ -16,7 +16,7 @@ path = "src/main.rs" [profile.dev] opt-level = 3 -debug = false +debug = true debug-assertions = false overflow-checks = false lto = true @@ -29,6 +29,7 @@ rpath = false lto = true panic = "abort" codegen-units = 1 +debug-assertions = false [workspace] members = [] diff --git a/dataplane/ebpf/src/main.rs b/dataplane/ebpf/src/main.rs index e16ec0ac..e4b3b8b4 100644 --- a/dataplane/ebpf/src/main.rs +++ b/dataplane/ebpf/src/main.rs @@ -126,5 +126,5 @@ fn try_tc_egress(ctx: TcContext) -> Result { #[panic_handler] fn panic(_info: &core::panic::PanicInfo) -> ! { - unsafe { core::hint::unreachable_unchecked() } + loop {} } diff --git a/dataplane/loader/src/main.rs b/dataplane/loader/src/main.rs index 6d044748..88db3e08 100644 --- a/dataplane/loader/src/main.rs +++ b/dataplane/loader/src/main.rs @@ -4,11 +4,11 @@ Copyright 2023 The Kubernetes Authors. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ -use std::net::Ipv4Addr; +use std::{net::Ipv4Addr, path::Path}; use anyhow::Context; use api_server::start as start_api_server; -use aya::maps::HashMap; +use aya::maps::{HashMap, Map, MapData}; use aya::programs::{tc, SchedClassifier, TcAttachType}; use aya::{include_bytes_aligned, Bpf}; use aya_log::BpfLogger; @@ -26,48 +26,73 @@ struct Opt { async fn main() -> Result<(), anyhow::Error> { let opt = Opt::parse(); + // TODO(astoycos) Let's determine a better way to let processes know bpfd is up and running, + // Maybe if we're not running as a privileged deployment ALWAYS wait for bpfd?. + std::thread::sleep(std::time::Duration::from_secs(5)); env_logger::init(); - info!("loading ebpf programs"); - - #[cfg(debug_assertions)] - let mut bpf = Bpf::load(include_bytes_aligned!( - "../../target/bpfel-unknown-none/debug/loader" - ))?; - #[cfg(not(debug_assertions))] - let mut bpf = Bpf::load(include_bytes_aligned!( - "../../target/bpfel-unknown-none/release/loader" - ))?; - if let Err(e) = BpfLogger::init(&mut bpf) { - warn!("failed to initialize eBPF logger: {}", e); - } + // If bpfd loaded the programs just load the maps. + let bpfd_maps = Path::new("/run/bpfd/fs/maps"); + + if bpfd_maps.exists() { + info!("programs loaded via bpfd"); + let backends: HashMap<_, BackendKey, BackendList> = Map::HashMap( + MapData::from_pin(bpfd_maps.join("BACKENDS")).expect("no maps named BACKENDS"), + ) + .try_into()?; + + let gateway_indexes: HashMap<_, BackendKey, u16> = Map::HashMap( + MapData::from_pin(bpfd_maps.join("GATEWAY_INDEXES")) + .expect("no maps named GATEWAY_INDEXES"), + ) + .try_into()?; + + info!("starting api server"); + start_api_server(Ipv4Addr::new(0, 0, 0, 0), 9874, backends, gateway_indexes).await?; + } else { + info!("loading ebpf programs"); + + #[cfg(debug_assertions)] + let mut bpf = Bpf::load(include_bytes_aligned!( + "../../target/bpfel-unknown-none/debug/loader" + ))?; + #[cfg(not(debug_assertions))] + let mut bpf = Bpf::load(include_bytes_aligned!( + "../../target/bpfel-unknown-none/release/loader" + ))?; + if let Err(e) = BpfLogger::init(&mut bpf) { + warn!("failed to initialize eBPF logger: {}", e); + } + + info!("attaching tc_ingress program to {}", &opt.iface); - info!("attaching tc_ingress program to {}", &opt.iface); - - let _ = tc::qdisc_add_clsact(&opt.iface); - let ingress_program: &mut SchedClassifier = - bpf.program_mut("tc_ingress").unwrap().try_into()?; - ingress_program.load()?; - ingress_program - .attach(&opt.iface, TcAttachType::Ingress) - .context("failed to attach the ingress TC program")?; - - info!("attaching tc_egress program to {}", &opt.iface); - - let egress_program: &mut SchedClassifier = bpf.program_mut("tc_egress").unwrap().try_into()?; - egress_program.load()?; - egress_program - .attach(&opt.iface, TcAttachType::Egress) - .context("failed to attach the egress TC program")?; - - info!("starting api server"); - let backends: HashMap<_, BackendKey, BackendList> = - HashMap::try_from(bpf.take_map("BACKENDS").expect("no maps named BACKENDS"))?; - let gateway_indexes: HashMap<_, BackendKey, u16> = HashMap::try_from( - bpf.take_map("GATEWAY_INDEXES") - .expect("no maps named GATEWAY_INDEXES"), - )?; - start_api_server(Ipv4Addr::new(0, 0, 0, 0), 9874, backends, gateway_indexes).await?; + let _ = tc::qdisc_add_clsact(&opt.iface); + let ingress_program: &mut SchedClassifier = + bpf.program_mut("tc_ingress").unwrap().try_into()?; + ingress_program.load()?; + ingress_program + .attach(&opt.iface, TcAttachType::Ingress) + .context("failed to attach the ingress TC program")?; + + info!("attaching tc_egress program to {}", &opt.iface); + + let egress_program: &mut SchedClassifier = + bpf.program_mut("tc_egress").unwrap().try_into()?; + egress_program.load()?; + egress_program + .attach(&opt.iface, TcAttachType::Egress) + .context("failed to attach the egress TC program")?; + + info!("starting api server"); + let backends: HashMap<_, BackendKey, BackendList> = + HashMap::try_from(bpf.take_map("BACKENDS").expect("no maps named BACKENDS"))?; + let gateway_indexes: HashMap<_, BackendKey, u16> = HashMap::try_from( + bpf.take_map("GATEWAY_INDEXES") + .expect("no maps named GATEWAY_INDEXES"), + )?; + + start_api_server(Ipv4Addr::new(0, 0, 0, 0), 9874, backends, gateway_indexes).await?; + } info!("Exiting..."); diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index f25f816e..92c69f74 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -24,12 +24,15 @@ import ( "fmt" "os" "testing" + "time" "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/loadimage" "github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/metallb" "github.com/kong/kubernetes-testing-framework/pkg/clusters/types/kind" "github.com/kong/kubernetes-testing-framework/pkg/environments" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned" testutils "github.com/kong/blixt/internal/test/utils" @@ -50,13 +53,20 @@ var ( existingCluster = os.Getenv("BLIXT_USE_EXISTING_KIND_CLUSTER") keepTestCluster = func() bool { return os.Getenv("BLIXT_TEST_KEEP_CLUSTER") == "true" || existingCluster != "" }() keepKustomizeDeploys = func() bool { return os.Getenv("BLIXT_TEST_KEEP_KUSTOMIZE_DEPLOYS") == "true" }() + blixtUseBpfd = func() bool { return os.Getenv("BLIXT_USE_BPFD") == "true" && existingCluster == "" }() mainCleanupKey = "main" ) const ( - gwCRDsKustomize = "https://github.com/kubernetes-sigs/gateway-api/config/crd/experimental?ref=v0.8.1" - testKustomize = "../../config/tests/integration" + gwCRDsKustomize = "https://github.com/kubernetes-sigs/gateway-api/config/crd/experimental?ref=v0.8.1" + testKustomizeBpfdInstall = "../../config/bpfd-install" + testKustomize = "../../config/tests/integration" + testKustomizeBpfd = "../../config/tests/integration-bpfd" + bpfdNs = "bpfd" + bpfdConfigName = "bpfd-config" + bpfdOperatorName = "bpfd-operator" + bpfdDsName = "bpfd-daemon" ) func TestMain(m *testing.M) { @@ -112,6 +122,23 @@ func TestMain(m *testing.M) { gwclient, err = versioned.NewForConfig(env.Cluster().Config()) exitOnErr(err) + if blixtUseBpfd { + // deploy bpfd for blixt + fmt.Println("INFO: deploying bpfd") + exitOnErr(clusters.KustomizeDeployForCluster(ctx, env.Cluster(), testKustomizeBpfdInstall)) + if !keepKustomizeDeploys { + addCleanup(mainCleanupKey, func(context.Context) error { + cleanupLog("delete bpfd configmap to cleanup bpfd daemon") + env.Cluster().Client().CoreV1().ConfigMaps(bpfdNs).Delete(ctx, bpfdConfigName, metav1.DeleteOptions{}) + waitForBpfdConfigDelete(ctx, env) + cleanupLog("deleting bpfd namespace") + return env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, bpfdNs, metav1.DeleteOptions{}) + }) + } + + exitOnErr(waitForBpfdReadiness(ctx, env)) + } + // deploy the Gateway API CRDs fmt.Println("INFO: deploying Gateway API CRDs") exitOnErr(clusters.KustomizeDeployForCluster(ctx, env.Cluster(), gwCRDsKustomize)) @@ -122,15 +149,28 @@ func TestMain(m *testing.M) { }) } - // deploy the blixt controlplane and dataplane, rbac permissions, e.t.c. - // this is what the tests will actually run against. - fmt.Println("INFO: deploying blixt via config/test kustomize") - exitOnErr(clusters.KustomizeDeployForCluster(ctx, env.Cluster(), testKustomize)) - if !keepKustomizeDeploys { - addCleanup(mainCleanupKey, func(context.Context) error { - cleanupLog("cleaning up blixt via config/test kustomize") - return clusters.KustomizeDeleteForCluster(ctx, env.Cluster(), testKustomize) - }) + if blixtUseBpfd { + // deploy the blixt controlplane and dataplane, rbac permissions, e.t.c. + // this is what the tests will actually run against. + fmt.Println("INFO: deploying blixt via config/test-bpfd kustomize") + exitOnErr(clusters.KustomizeDeployForCluster(ctx, env.Cluster(), testKustomizeBpfd)) + if !keepKustomizeDeploys { + addCleanup(mainCleanupKey, func(context.Context) error { + cleanupLog("cleaning up blixt via config/test kustomize") + return clusters.KustomizeDeleteForCluster(ctx, env.Cluster(), testKustomizeBpfd) + }) + } + } else { + // deploy the blixt controlplane and dataplane, rbac permissions, e.t.c. + // this is what the tests will actually run against. + fmt.Println("INFO: deploying blixt via config/test kustomize") + exitOnErr(clusters.KustomizeDeployForCluster(ctx, env.Cluster(), testKustomize)) + if !keepKustomizeDeploys { + addCleanup(mainCleanupKey, func(context.Context) error { + cleanupLog("cleaning up blixt via config/test kustomize") + return clusters.KustomizeDeleteForCluster(ctx, env.Cluster(), testKustomize) + }) + } } fmt.Println("INFO: waiting for Blixt component readiness") @@ -195,3 +235,72 @@ func runCleanup(cleanupKey string) (cleanupErr error) { delete(cleanup, cleanupKey) return } + +func waitForBpfdReadiness(ctx context.Context, env environments.Environment) error { + for { + time.Sleep(2 * time.Second) + select { + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + return fmt.Errorf("context completed while waiting for components: %w", err) + } + return fmt.Errorf("context completed while waiting for components") + default: + fmt.Println("INFO: waiting for bpfd") + var controlplaneReady, dataplaneReady bool + + controlplane, err := env.Cluster().Client().AppsV1().Deployments(bpfdNs).Get(ctx, bpfdOperatorName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + fmt.Println("INFO: bpfd-operator dep not found yet") + continue + } + return err + } + if controlplane.Status.AvailableReplicas > 0 { + controlplaneReady = true + } + + dataplane, err := env.Cluster().Client().AppsV1().DaemonSets(bpfdNs).Get(ctx, bpfdDsName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + fmt.Println("INFO: bpfd daemon not found yet") + continue + } + return err + } + if dataplane.Status.NumberAvailable > 0 { + dataplaneReady = true + } + + if controlplaneReady && dataplaneReady { + fmt.Println("INFO: bpfd-operator is ready") + return nil + } + } + } +} + +func waitForBpfdConfigDelete(ctx context.Context, env environments.Environment) error { + for { + time.Sleep(2 * time.Second) + select { + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + return fmt.Errorf("context completed while waiting for components: %w", err) + } + return fmt.Errorf("context completed while waiting for components") + default: + fmt.Println("INFO: waiting for bpfd config deletion") + + _, err := env.Cluster().Client().CoreV1().ConfigMaps(bpfdNs).Get(ctx, bpfdConfigName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + fmt.Println("INFO: bpfd configmap deleted successfully") + return nil + } + return err + } + } + } +}