Skip to content
This repository was archived by the owner on Nov 3, 2023. It is now read-only.

Commit

Permalink
Merge pull request #58 from dhiltgen/registry_example
Browse files Browse the repository at this point in the history
Fix ConfigMap glitches and doc registry caching
  • Loading branch information
dhiltgen authored Dec 3, 2020
2 parents efe365a + 4993b42 commit e7e337e
Show file tree
Hide file tree
Showing 11 changed files with 375 additions and 25 deletions.
6 changes: 6 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ Assuming you have a valid kube configuration pointed at a cluster, you can run t
make integration
```

If you want to run a single suite of tests while working on a specific area of the tests or main code, use something like this:
```
make integration EXTRA_GO_TEST_FLAGS="-run TestConfigMapSuite -v"
```
Hint: find the current test suites with `grep "func Test" integration/suites/*.go`

To check your code for **lint/style consistency**, run
```
make lint
Expand Down
23 changes: 23 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,29 @@ kubectl create secret docker-registry mysecret --docker-server='<registry hostna
kubectl build --push --registry-secret mysecret -t <registry hostname>/<namespace>/<repo>:<tag> -f Dockerfile ./
```

### Registry-based Caching

BuildKit is smart about caching prior build results for efficient incremental
builds. This works great for a single-node scenario, but if you want to build
on a multi-node cluster, you can take advantage of BuildKit's ability to use a
registry for cache persistence. This can have a significant improvement on
incremental build times regardless of which node in the cluster your build lands
on. For best performance, this registry should be "local" to the cluster. The
following examples demonstrate this pattern:

* [./examples/local-registry.yaml](./examples/local-registry.yaml) A kubernetes Deployment+Service to run a local registry (unauthenticated)
* [./examples/local-registry-buildkitd.toml](./examples/local-registry-buildkitd.toml) A BuildKit TOML configuration example for the above Registry that configures it for **"insecure" access**

To setup from the root of this tree:
```
kubectl apply -f ./examples/local-registry.yaml
kubectl buildkit create --config ./examples/local-registry-buildkitd.toml
```

You can then build using the registry cache with the command:
```
kubectl build -t myimage --cache-to=type=registry,ref=registry:5000/cache --cache-from=type=registry,ref=registry:5000/cache .
```

## Contributing

Expand Down
11 changes: 11 additions & 0 deletions examples/local-registry-buildkitd.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Example buildkitd.toml configuration for a local insecure registry
# Initialize buildkit with:
#
# kubectl buildkit create --config ./local-registry-buildkitd.toml
debug = false
[worker.containerd]
namespace = "k8s.io"
[registry."registry:5000"]
http = true
insecure = true

44 changes: 44 additions & 0 deletions examples/local-registry.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Example for running a local registry in your cluster to use for caching purposes
#
# Note: this will not be visible to the underlying container runtime, so you won't
# be able to run images in the cluster from this registry, but you can use it
# as a cache for a multi-node cluster to speed up builds so every builder has access
# to the same cached content

# TODO explore a variant of this for Host networking, binding to localhost on port 5000
# and see if that's viable for a local dev registry pattern

apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: docker.io/registry
ports:
- containerPort: 5000

---
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
type: ClusterIP
selector:
app: registry
ports:
- protocol: TCP
port: 5000
40 changes: 29 additions & 11 deletions integration/common/basesuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,45 @@
package common

import (
"context"
"fmt"
"path"
"strings"

"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

type BaseSuite struct {
suite.Suite
Name string
CreateFlags []string
Name string
CreateFlags []string
SkipSetupCreate bool

ClientSet *kubernetes.Clientset
Namespace string
}

func (s *BaseSuite) SetupTest() {
logrus.Infof("%s: Setting up builder", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
var err error
if !s.SkipSetupCreate {
logrus.Infof("%s: Setting up builder", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
}

s.ClientSet, s.Namespace, err = GetKubeClientset()
require.NoError(s.T(), err, "%s: kube client failed", s.Name)
}

func (s *BaseSuite) TearDownTest() {
Expand All @@ -36,6 +50,10 @@ func (s *BaseSuite) TearDownTest() {
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
configMapClient := s.ClientSet.CoreV1().ConfigMaps(s.Namespace)
_, err = configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}

func (s *BaseSuite) TestSimpleBuild() {
Expand Down
24 changes: 24 additions & 0 deletions integration/common/kubeclient.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Copyright (C) 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package common

import (
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
)

// GetKubeClientset retrieves the clientset and namespace
func GetKubeClientset() (*kubernetes.Clientset, string, error) {
configFlags := genericclioptions.NewConfigFlags(true)
clientConfig := configFlags.ToRawKubeConfigLoader()
ns, _, err := clientConfig.Namespace()
if err != nil {
return nil, "", err
}
restClientConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, "", err
}
clientset, err := kubernetes.NewForConfig(restClientConfig)
return clientset, ns, err
}
Loading

0 comments on commit e7e337e

Please sign in to comment.