From 9438b36c8e33a7d885cdc1961d9936ec20e376ec Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 1 Dec 2020 11:29:39 -0800 Subject: [PATCH] Fix ConfigMap glitches and doc registry caching This fixes a few corner cases in how we handle ConfigMaps for the buildkitd toml config file. It adds integration coverage for those scenarios. It also adds a brief example showing how to configure a local registry for caching purposes to speed up incremental builds on a multi-node cluster. --- CONTRIBUTING.md | 5 + README.md | 23 +++ examples/local-registry-buildkitd.toml | 11 ++ examples/local-registry.yaml | 44 ++++++ integration/common/basesuites.go | 40 +++-- integration/common/kubeclient.go | 24 +++ integration/suites/configmap_test.go | 207 +++++++++++++++++++++++++ integration/suites/default_test.go | 6 +- pkg/driver/kubernetes/driver.go | 21 ++- pkg/driver/kubernetes/factory.go | 1 + 10 files changed, 362 insertions(+), 20 deletions(-) create mode 100644 examples/local-registry-buildkitd.toml create mode 100644 examples/local-registry.yaml create mode 100644 integration/common/kubeclient.go create mode 100644 integration/suites/configmap_test.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a5a4ceff..a2b5b490 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,11 @@ Assuming you have a valid kube configuration pointed at a cluster, you can run t make integration ``` +If you want to run a single suite of tests while working on a specific area of the tests or main code, use something like this: +``` +make integration EXTRA_GO_TEST_FLAGS="-run TestConfigMapSuite -v" +``` + To check your code for **lint/style consistency**, run ``` make lint diff --git a/README.md b/README.md index 43e0c5f7..e2a8595f 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,29 @@ kubectl create secret docker-registry mysecret --docker-server='//: -f Dockerfile ./ ``` +### Registry-based Caching + +BuildKit is smart about caching prior build results for efficient incremental +builds. This works great for a single-node scenario, but if you want to build +on a multi-node cluster, you can take advantage of BuildKit's ability to use a +registry for cache persistence. This can have a significant improvement on +incremental build times regardless of which node in the cluster your build lands +on. For best performance, this registry should be "local" to the cluster. The +following examples demonstrate this pattern: + +* [./examples/local-registry.yaml](./examples/local-registry.yaml) A kubernetes Deployment+Service to run a local registry (unauthenticated) +* [./examples/local-registry-buildkitd.toml](./examples/local-registry-buildkitd.toml) A BuildKit TOML configuration example for the above Registry that configures it for **"insecure" access** + +To setup from the root of this tree: +``` +kubectl apply -f ./examples/local-registry.yaml +kubectl buildkit create --config ./examples/local-registry-buildkitd.toml +``` + +You can then build using the registry cache with something like +``` +kubectl build -t myimage --cache-to=type=registry,ref=registry:5000/cache --cache-from=type=registry,ref=registry:5000/cache . +``` ## Contributing diff --git a/examples/local-registry-buildkitd.toml b/examples/local-registry-buildkitd.toml new file mode 100644 index 00000000..79112f7b --- /dev/null +++ b/examples/local-registry-buildkitd.toml @@ -0,0 +1,11 @@ +# Example buildkitd.toml configuration for a local insecure registry +# Initialize buildkit with: +# +# kubectl buildkit create --config ./local-registry-buildkitd.toml +debug = false +[worker.containerd] + namespace = "k8s.io" +[registry."registry:5000"] + http = true + insecure = true + diff --git a/examples/local-registry.yaml b/examples/local-registry.yaml new file mode 100644 index 00000000..db0566a8 --- /dev/null +++ b/examples/local-registry.yaml @@ -0,0 +1,44 @@ +# Example for running a local registry in your cluster to use for caching purposes +# +# Note: this will not be visible to the underlying container runtime, so you won't +# be able to run images in the cluster from this registry, but you can use it +# as a cache for a multi-node cluster to speed up builds so every builder has access +# to the same cached content + +# TODO explore a variant of this for Host networking, binding to localhost on port 5000 +# and see if that's viable for a local dev registry pattern + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: registry + labels: + app: registry +spec: + replicas: 1 + selector: + matchLabels: + app: registry + template: + metadata: + labels: + app: registry + spec: + containers: + - name: registry + image: docker.io/registry + ports: + - containerPort: 5000 + +--- +apiVersion: v1 +kind: Service +metadata: + name: registry +spec: + type: ClusterIP + selector: + app: registry + ports: + - protocol: TCP + port: 5000 diff --git a/integration/common/basesuites.go b/integration/common/basesuites.go index ce44ceb2..00e6b478 100644 --- a/integration/common/basesuites.go +++ b/integration/common/basesuites.go @@ -3,6 +3,7 @@ package common import ( + "context" "fmt" "path" "strings" @@ -10,24 +11,37 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) type BaseSuite struct { suite.Suite - Name string - CreateFlags []string + Name string + CreateFlags []string + SkipSetupCreate bool + + ClientSet *kubernetes.Clientset + Namespace string } func (s *BaseSuite) SetupTest() { - logrus.Infof("%s: Setting up builder", s.Name) - args := append( - []string{ - s.Name, - }, - s.CreateFlags..., - ) - err := RunBuildkit("create", args) - require.NoError(s.T(), err, "%s: builder create failed", s.Name) + var err error + if !s.SkipSetupCreate { + logrus.Infof("%s: Setting up builder", s.Name) + args := append( + []string{ + s.Name, + }, + s.CreateFlags..., + ) + err := RunBuildkit("create", args) + require.NoError(s.T(), err, "%s: builder create failed", s.Name) + } + + s.ClientSet, s.Namespace, err = GetKubeClientset() + require.NoError(s.T(), err, "%s: kube client failed", s.Name) } func (s *BaseSuite) TearDownTest() { @@ -36,6 +50,10 @@ func (s *BaseSuite) TearDownTest() { s.Name, }) require.NoError(s.T(), err, "%s: builder rm failed", s.Name) + configMapClient := s.ClientSet.CoreV1().ConfigMaps(s.Namespace) + _, err = configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.Error(s.T(), err, "config map wasn't cleaned up") + require.Contains(s.T(), err.Error(), "not found") } func (s *BaseSuite) TestSimpleBuild() { diff --git a/integration/common/kubeclient.go b/integration/common/kubeclient.go new file mode 100644 index 00000000..3a252359 --- /dev/null +++ b/integration/common/kubeclient.go @@ -0,0 +1,24 @@ +// Copyright (C) 2020 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 +package common + +import ( + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" +) + +// GetKubeClientset retrieves the clientset and namespace +func GetKubeClientset() (*kubernetes.Clientset, string, error) { + configFlags := genericclioptions.NewConfigFlags(true) + clientConfig := configFlags.ToRawKubeConfigLoader() + ns, _, err := clientConfig.Namespace() + if err != nil { + return nil, "", err + } + restClientConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, "", err + } + clientset, err := kubernetes.NewForConfig(restClientConfig) + return clientset, ns, err +} diff --git a/integration/suites/configmap_test.go b/integration/suites/configmap_test.go new file mode 100644 index 00000000..4359b3d0 --- /dev/null +++ b/integration/suites/configmap_test.go @@ -0,0 +1,207 @@ +// Copyright (C) 2020 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 +package suites + +import ( + "context" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/vmware-tanzu/buildkit-cli-for-kubectl/integration/common" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +type configMapSuite struct { + suite.Suite + Name string + CreateFlags []string + + ClientSet *kubernetes.Clientset + Namespace string + + configMapClient v1.ConfigMapInterface +} + +func (s *configMapSuite) SetupTest() { + var err error + s.ClientSet, s.Namespace, err = common.GetKubeClientset() + require.NoError(s.T(), err, "%s: kube client failed", s.Name) + s.configMapClient = s.ClientSet.CoreV1().ConfigMaps(s.Namespace) +} + +func (s *configMapSuite) getConfigMap() *corev1.ConfigMap { + payload := `# pre-existing configuration +# nothing to see here... +` + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: s.Namespace, + Name: s.Name, + }, + BinaryData: map[string][]byte{ + "buildkitd.toml": []byte(payload), + }, + } +} + +func (s *configMapSuite) TestDefaultCreate() { + logrus.Infof("%s: Creating builder with default config", s.Name) + args := append( + []string{ + s.Name, + }, + s.CreateFlags..., + ) + err := common.RunBuildkit("create", args) + require.NoError(s.T(), err, "%s: builder create failed", s.Name) + cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name) + data, ok := cfg.BinaryData["buildkitd.toml"] + require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData) + // Spot check an expected string + require.Contains(s.T(), string(data), "Default buildkitd configuration.") + + // Tear down the builder + logrus.Infof("%s: Removing builder", s.Name) + err = common.RunBuildkit("rm", []string{ + s.Name, + }) + require.NoError(s.T(), err, "%s: builder rm failed", s.Name) + _, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.Error(s.T(), err, "config map wasn't cleaned up") + require.Contains(s.T(), err.Error(), "not found") +} + +// Pre-create a config and make sure it does not get overridden by the default creation flow +func (s *configMapSuite) TestPreExistingConfigDefaultCreate() { + logrus.Infof("%s: Creating pre-existing config", s.Name) + _, err := s.configMapClient.Create(context.Background(), s.getConfigMap(), metav1.CreateOptions{}) + require.NoError(s.T(), err, "%s: pre-existing configmap create failed", s.Name) + + logrus.Infof("%s: Creating builder with default config", s.Name) + args := append( + []string{ + s.Name, + }, + s.CreateFlags..., + ) + err = common.RunBuildkit("create", args) + require.NoError(s.T(), err, "%s: builder create failed", s.Name) + cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name) + data, ok := cfg.BinaryData["buildkitd.toml"] + require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData) + // Spot check an expected string doesn't exist + require.NotContains(s.T(), string(data), "Default buildkitd configuration.") + + // Tear down the builder + logrus.Infof("%s: Removing builder", s.Name) + err = common.RunBuildkit("rm", []string{ + s.Name, + }) + require.NoError(s.T(), err, "%s: builder rm failed", s.Name) + _, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + // TODO if we preserve pre-existing configmaps this will need to be refined. + require.Error(s.T(), err, "config map wasn't cleaned up") + require.Contains(s.T(), err.Error(), "not found") +} + +func (s *configMapSuite) TestCustomCreate() { + logrus.Infof("%s: Creating builder with custom config", s.Name) + dir, cleanup, err := common.NewBuildContext(map[string]string{ + "buildkitd.toml": `# Custom config file +# nothing to see here 2 +`}) + require.NoError(s.T(), err, "%s: config file creation", s.Name) + + defer cleanup() + + args := append( + []string{ + "--config", filepath.Join(dir, "buildkitd.toml"), + s.Name, + }, + s.CreateFlags..., + ) + err = common.RunBuildkit("create", args) + require.NoError(s.T(), err, "%s: builder create failed", s.Name) + cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name) + data, ok := cfg.BinaryData["buildkitd.toml"] + require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData) + // Spot check an expected string + require.NotContains(s.T(), string(data), "Default buildkitd configuration.", string(data)) + require.Contains(s.T(), string(data), "Custom config file", string(data)) + + // Tear down the builder + logrus.Infof("%s: Removing builder", s.Name) + err = common.RunBuildkit("rm", []string{ + s.Name, + }) + require.NoError(s.T(), err, "%s: builder rm failed", s.Name) + _, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.Error(s.T(), err, "config map wasn't cleaned up") + require.Contains(s.T(), err.Error(), "not found") +} +func (s *configMapSuite) TestPreExistingWithCustomCreate() { + logrus.Infof("%s: Creating pre-existing config", s.Name) + _, err := s.configMapClient.Create(context.Background(), s.getConfigMap(), metav1.CreateOptions{}) + require.NoError(s.T(), err, "%s: pre-existing configmap create failed", s.Name) + + logrus.Infof("%s: Creating builder with custom config", s.Name) + dir, cleanup, err := common.NewBuildContext(map[string]string{ + "buildkitd.toml": `# Custom config file +# nothing to see here 2 +`}) + require.NoError(s.T(), err, "%s: config file create failed", s.Name) + + defer cleanup() + + args := append( + []string{ + "--config", filepath.Join(dir, "buildkitd.toml"), + s.Name, + }, + s.CreateFlags..., + ) + err = common.RunBuildkit("create", args) + require.NoError(s.T(), err, "%s: builder create failed", s.Name) + cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name) + data, ok := cfg.BinaryData["buildkitd.toml"] + require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData) + // Spot check expected strings + require.NotContains(s.T(), string(data), "Default buildkitd configuration.", string(data)) + require.NotContains(s.T(), string(data), "pre-existing configuration", string(data)) + require.Contains(s.T(), string(data), "Custom config file", string(data)) + + // Tear down the builder + logrus.Infof("%s: Removing builder", s.Name) + err = common.RunBuildkit("rm", []string{ + s.Name, + }) + require.NoError(s.T(), err, "%s: builder rm failed", s.Name) + _, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{}) + require.Error(s.T(), err, "config map wasn't cleaned up") + require.Contains(s.T(), err.Error(), "not found") +} + +func TestConfigMapSuite(t *testing.T) { + common.Skipper(t) + //t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug + suite.Run(t, &configMapSuite{ + Name: "configmaptest", + }) +} diff --git a/integration/suites/default_test.go b/integration/suites/default_test.go index ae40d943..d6dc0b65 100644 --- a/integration/suites/default_test.go +++ b/integration/suites/default_test.go @@ -12,16 +12,14 @@ import ( type DefaultSuite struct{ common.BaseSuite } -func (s *DefaultSuite) SetupTest() { - // For the "default" scenario, we rely on the initial test case to establish the builder with defaults -} - func TestDefaultSuite(t *testing.T) { common.Skipper(t) //t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug suite.Run(t, &DefaultSuite{ BaseSuite: common.BaseSuite{ Name: "buildkit", // TODO pull this from the actual default name + // For the "default" scenario, we rely on the initial test case to establish the builder with defaults + SkipSetupCreate: true, }, }) } diff --git a/pkg/driver/kubernetes/driver.go b/pkg/driver/kubernetes/driver.go index a46b9f14..cea3bbec 100644 --- a/pkg/driver/kubernetes/driver.go +++ b/pkg/driver/kubernetes/driver.go @@ -48,7 +48,8 @@ const ( DefaultContainerRuntime = "docker" // Temporary since most kubernetes clusters are still Docker based today... // TODO - consider adding other default values here to aid users in fine-tuning by editing the configmap post deployment - DefaultConfigFileTemplate = `debug = false + DefaultConfigFileTemplate = `# Default buildkitd configuration. Use --config to override during create +debug = false [worker.containerd] namespace = "{{ .ContainerdNamespace }}" ` @@ -69,6 +70,7 @@ type Driver struct { podChooser podchooser.PodChooser eventClient clientcorev1.EventInterface userSpecifiedRuntime bool + userSpecifiedConfig bool namespace string loadbalance string authHintMessage string @@ -76,14 +78,19 @@ type Driver struct { func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error { return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error { - _, err := d.configMapClient.Get(ctx, d.configMap.Name, metav1.GetOptions{}) + if err != nil { + // Doesn't exist, create it _, err = d.configMapClient.Create(ctx, d.configMap, metav1.CreateOptions{}) - if err != nil { - return errors.Wrapf(err, "error while calling configMap.Create for %q", d.configMap.Name) - } + } else if d.userSpecifiedConfig { + // err was nil, thus it already exists, and user passed a new config, so update it + _, err = d.configMapClient.Update(ctx, d.configMap, metav1.UpdateOptions{}) } + if err != nil { + return errors.Wrapf(err, "configmap error for buildkitd.toml for %q", d.configMap.Name) + } + _, err = d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{}) if err != nil { // TODO: return err if err != ErrNotFound @@ -360,6 +367,10 @@ func (d *Driver) Rm(ctx context.Context, force bool) error { if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil { return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name) } + // TODO - consider checking for our expected labels and preserve pre-existing ConfigMaps + if err := d.configMapClient.Delete(ctx, d.configMap.Name, metav1.DeleteOptions{}); err != nil { + return errors.Wrapf(err, "error while calling configMapClient.Delete for %q", d.configMap.Name) + } return nil } diff --git a/pkg/driver/kubernetes/factory.go b/pkg/driver/kubernetes/factory.go index dd590e06..949fed9e 100644 --- a/pkg/driver/kubernetes/factory.go +++ b/pkg/driver/kubernetes/factory.go @@ -233,6 +233,7 @@ func (d *Driver) initDriverFromConfig() error { // and make sure things get wired up properly, and/or error out if the // user tries to set properties that should be in the config file d.configMap = manifest.NewConfigMap(deploymentOpt, data) + d.userSpecifiedConfig = true } return nil }