diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 5acf9246ca..8d5077ce58 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -223,6 +223,7 @@ func addKanikoOptionsFlags() { RootCmd.PersistentFlags().BoolVarP(&opts.CacheCopyLayers, "cache-copy-layers", "", false, "Caches copy layers") RootCmd.PersistentFlags().VarP(&opts.IgnorePaths, "ignore-path", "", "Ignore these paths when taking a snapshot. Set it repeatedly for multiple paths.") RootCmd.PersistentFlags().BoolVarP(&opts.ForceBuildMetadata, "force-build-metadata", "", false, "Force add metadata layers to build image") + RootCmd.PersistentFlags().StringVarP(&opts.Isolation, "isolation", "", "chroot", "Which isolation method to use (possible implementation: chroot, none)") // Allow setting --registry-mirror using an environment variable. if val, ok := os.LookupEnv("KANIKO_REGISTRY_MIRROR"); ok { diff --git a/deploy/Dockerfile b/deploy/Dockerfile index 2e760585cb..d36a9d765d 100644 --- a/deploy/Dockerfile +++ b/deploy/Dockerfile @@ -44,10 +44,37 @@ RUN \ FROM debian:bullseye-slim AS certs RUN apt update && apt install -y ca-certificates +FROM debian:bullseye-slim AS passwd +RUN echo "root:x:0:" > /etc/group +RUN echo "root:x:0:0:root:/root:/sbin/nologin" > /etc/passwd + +# idmap runnable without --privileged (but still requires seccomp=unconfined apparmor=unconfined) +FROM alpine AS idmap +ARG SHADOW_VERSION="v4.11.1" +ARG PATCHELF_VERSION="0.15.0" +ENV DEBIAN_FRONTEND=noninteractive +RUN apk add --no-cache automake autoconf gettext git build-base libcap-dev libtool make gettext gettext-dev linux-pam-dev expect byacc +RUN git clone https://github.com/shadow-maint/shadow.git /shadow +WORKDIR /shadow +RUN git fetch && git checkout $SHADOW_VERSION +RUN ./autogen.sh --disable-nls --disable-man --without-audit --without-selinux --without-acl --without-attr --without-tcb --without-nscd \ + --disable-shared --enable-static=yes +RUN make +# set setgid and setuid filemode bits on new{gid,uid}map +RUN chmod g+s src/newgidmap && chmod u+s src/newuidmap + +RUN wget -O patchelf.tar.gz https://github.com/NixOS/patchelf/releases/download/${PATCHELF_VERSION}/patchelf-${PATCHELF_VERSION}-$(uname -m).tar.gz && \ + tar xf patchelf.tar.gz && \ + cp bin/patchelf /bin + +# use patchelf to modify the library path so lib musl can reside in /kaniko/lib dir +RUN patchelf --set-interpreter /kaniko/lib/ld-musl-$(uname -m).so.1 src/newuidmap +RUN patchelf --set-interpreter /kaniko/lib/ld-musl-$(uname -m).so.1 src/newgidmap + FROM scratch # Create kaniko directory with world write permission to allow non root run # use musl busybox since it's staticly compiled -RUN --mount=from=busybox:musl,dst=/usr/ ["busybox", "sh", "-c", "mkdir -p /kaniko && chmod 777 /kaniko"] +RUN --mount=from=busybox:musl,dst=/usr/ ["busybox", "sh", "-c", "mkdir -p /kaniko /root /etc && chmod 777 /kaniko"] COPY --from=0 /src/out/executor /kaniko/executor COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr @@ -56,8 +83,12 @@ COPY --from=0 /usr/local/bin/docker-credential-acr-env /kaniko/docker-credential COPY --from=certs /etc/ssl/certs/ca-certificates.crt /kaniko/ssl/certs/ COPY --from=0 /kaniko/.docker /kaniko/.docker COPY files/nsswitch.conf /etc/nsswitch.conf -ENV HOME /root -ENV USER root +COPY --from=passwd /etc/passwd /etc/group /etc/ +COPY --from=idmap /shadow/src/newuidmap /shadow/src/newgidmap /kaniko/ +# shadowutils is not a static binary, need to use musl libc +COPY --from=idmap /lib/ld-musl-*.so.1 /lib/libc.musl-*.so.1 /kaniko/lib/ + +USER root ENV PATH /usr/local/bin:/kaniko ENV SSL_CERT_DIR=/kaniko/ssl/certs ENV DOCKER_CONFIG /kaniko/.docker/ diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 12c614d9f8..67e890a520 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -45,11 +45,40 @@ RUN \ FROM debian:bullseye-slim AS certs RUN apt update && apt install -y ca-certificates + +FROM debian:bullseye-slim AS passwd +RUN echo "root:x:0:" > /etc/group +RUN echo "root:x:0:0:root:/root:/sbin/nologin" > /etc/passwd + +# idmap runnable without --privileged (but still requires seccomp=unconfined apparmor=unconfined) +FROM alpine AS idmap +ARG SHADOW_VERSION="v4.11.1" +ARG PATCHELF_VERSION="0.15.0" +ENV DEBIAN_FRONTEND=noninteractive +RUN apk add --no-cache automake autoconf gettext git build-base libcap-dev libtool make gettext gettext-dev linux-pam-dev expect byacc +RUN git clone https://github.com/shadow-maint/shadow.git /shadow +WORKDIR /shadow +RUN git fetch && git checkout $SHADOW_VERSION +RUN ./autogen.sh --disable-nls --disable-man --without-audit --without-selinux --without-acl --without-attr --without-tcb --without-nscd \ + --disable-shared --enable-static=yes +RUN make +# set setgid and setuid filemode bits on new{gid,uid}map +RUN chmod g+s src/newgidmap && chmod u+s src/newuidmap + +RUN wget -O patchelf.tar.gz https://github.com/NixOS/patchelf/releases/download/${PATCHELF_VERSION}/patchelf-${PATCHELF_VERSION}-$(uname -m).tar.gz && \ + tar xf patchelf.tar.gz && \ + cp bin/patchelf /bin + +# use patchelf to modify the library path so lib musl can reside in /kaniko/lib dir +RUN patchelf --set-interpreter /kaniko/lib/ld-musl-$(uname -m).so.1 src/newuidmap +RUN patchelf --set-interpreter /kaniko/lib/ld-musl-$(uname -m).so.1 src/newgidmap + + # use musl busybox since it's staticly compiled on all platforms FROM busybox:musl as busybox FROM scratch # Create kaniko directory with world write permission to allow non root run -RUN --mount=from=busybox,dst=/usr/ ["busybox", "sh", "-c", "mkdir -p /kaniko && chmod 777 /kaniko"] +RUN --mount=from=busybox,dst=/usr/ ["busybox", "sh", "-c", "mkdir -p /kaniko /root && chmod 777 /kaniko"] COPY --from=0 /src/out/executor /kaniko/executor COPY --from=0 /src/out/warmer /kaniko/warmer @@ -63,8 +92,12 @@ VOLUME /busybox COPY --from=certs /etc/ssl/certs/ca-certificates.crt /kaniko/ssl/certs/ COPY --from=0 /kaniko/.docker /kaniko/.docker COPY files/nsswitch.conf /etc/nsswitch.conf -ENV HOME /root -ENV USER root +COPY --from=passwd /etc/passwd /etc/group /etc/ +COPY --from=idmap /shadow/src/newuidmap /shadow/src/newgidmap /kaniko/ +# shadowutils is not a static binary, need to use musl libc +COPY --from=idmap /lib/ld-musl-*.so.1 /lib/libc.musl-*.so.1 /kaniko/lib/ + +USER root ENV PATH /usr/local/bin:/kaniko:/busybox ENV SSL_CERT_DIR=/kaniko/ssl/certs ENV DOCKER_CONFIG /kaniko/.docker/ diff --git a/go.mod b/go.mod index 3e37f476a2..5c448b63c2 100644 --- a/go.mod +++ b/go.mod @@ -118,7 +118,7 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/locker v1.0.1 // indirect - github.com/moby/sys/mount v0.3.0 // indirect + github.com/moby/sys/mount v0.3.0 github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/moby/sys/symlink v0.2.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect diff --git a/integration/dockerfiles-isolation/Dockerfile_test_chroot b/integration/dockerfiles-isolation/Dockerfile_test_chroot new file mode 100644 index 0000000000..1953036324 --- /dev/null +++ b/integration/dockerfiles-isolation/Dockerfile_test_chroot @@ -0,0 +1,3 @@ +FROM alpine + +RUN ls -al diff --git a/integration/dockerfiles/Dockerfile_test_user_nonexisting b/integration/dockerfiles/Dockerfile_test_user_nonexisting index 8b65af54ac..7cce0c2e49 100644 --- a/integration/dockerfiles/Dockerfile_test_user_nonexisting +++ b/integration/dockerfiles/Dockerfile_test_user_nonexisting @@ -14,6 +14,7 @@ FROM debian:9.11 USER 1001:1001 +RUN ["echo", "hello"] RUN echo "hey2" > /tmp/foo USER 1001 RUN echo "hello" > /tmp/foobar diff --git a/integration/images.go b/integration/images.go index 5b68310aa7..594d035ecd 100644 --- a/integration/images.go +++ b/integration/images.go @@ -238,6 +238,17 @@ func addServiceAccountFlags(flags []string, serviceAccount string) []string { return flags } +// addSecurityFlags adds seccomp and apparmor profile disable and adds cap SYS_ADMIN +func addSecurityFlags(flags []string) []string { + return append(flags, []string{ + // chroot needs CAP_SYS_ADMIN + "--cap-add", "SYS_ADMIN", + // disable apparmor and seccomp because it permits mounts + "--security-opt", "apparmor=unconfined", + "--security-opt", "seccomp=unconfined", + }...) +} + func (d *DockerFileBuilder) BuildDockerImage(t *testing.T, imageRepo, dockerfilesPath, dockerfile, contextDir string) error { t.Logf("Building image for Dockerfile %s\n", dockerfile) @@ -283,7 +294,6 @@ func (d *DockerFileBuilder) BuildDockerImage(t *testing.T, imageRepo, dockerfile func (d *DockerFileBuilder) BuildImage(t *testing.T, config *integrationTestConfig, dockerfilesPath, dockerfile string) error { _, ex, _, _ := runtime.Caller(0) cwd := filepath.Dir(ex) - return d.BuildImageWithContext(t, config, dockerfilesPath, dockerfile, cwd) } @@ -375,16 +385,17 @@ func (d *DockerFileBuilder) buildCachedImages(config *integrationTestConfig, cac "-v", cwd + ":/workspace", "-e", benchmarkEnv} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) + dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", path.Join(buildContextPath, dockerfilesPath, dockerfile), "-d", kanikoImage, "-c", buildContextPath, cacheFlag, "--cache-repo", cacheRepo, - "--cache-dir", cacheDir) - for _, v := range args { - dockerRunFlags = append(dockerRunFlags, v) - } + "--cache-dir", cacheDir, + ) + dockerRunFlags = append(dockerRunFlags, args...) kanikoCmd := exec.Command("docker", dockerRunFlags...) _, err := RunCommandWithoutTest(kanikoCmd) @@ -420,6 +431,7 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi dockerRunFlags := []string{"run", "--net=host", "-v", cwd + ":/workspace"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -481,6 +493,8 @@ func buildKanikoImage( "-v", contextDir + ":/workspace", "-v", benchmarkDir + ":/kaniko/benchmarks", } + dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) if env, ok := envsMap[dockerfile]; ok { for _, envVariable := range env { @@ -488,8 +502,6 @@ func buildKanikoImage( } } - dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) - kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile) if dockerfilesPath == "" { kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile") diff --git a/integration/integration_test.go b/integration/integration_test.go index 0f4ef28dc8..07d1e47930 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -262,6 +262,7 @@ func testGitBuildcontextHelper(t *testing.T, repo string) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -327,6 +328,7 @@ func TestGitBuildcontextSubPath(t *testing.T) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append( dockerRunFlags, ExecutorImage, @@ -369,6 +371,7 @@ func TestBuildViaRegistryMirrors(t *testing.T) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_registry_mirror") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -410,6 +413,7 @@ func TestKanikoDir(t *testing.T) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_registry_mirror") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -452,6 +456,7 @@ func TestBuildWithLabels(t *testing.T) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_label:mylabel") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -492,6 +497,7 @@ func TestBuildWithHTTPError(t *testing.T) { kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_add_404") dockerRunFlags := []string{"run", "--net=host"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, "-d", kanikoImage, @@ -668,6 +674,7 @@ func TestExitCodePropagation(t *testing.T) { "-v", contextVolume, } dockerFlags = addServiceAccountFlags(dockerFlags, "") + dockerFlags = addSecurityFlags(dockerFlags) dockerFlags = append(dockerFlags, ExecutorImage, "-c", "dir:///workspace/", "-f", "./Dockerfile_exit_code_propagation", diff --git a/integration/integration_with_stdin_test.go b/integration/integration_with_stdin_test.go index fc023c9340..3b9bbbbdcf 100644 --- a/integration/integration_with_stdin_test.go +++ b/integration/integration_with_stdin_test.go @@ -71,7 +71,7 @@ func TestBuildWithStdin(t *testing.T) { gw := gzip.NewWriter(tarFile) defer gw.Close() - tw := util.NewTar(gw) + tw := util.NewTar(testDirLongPath, gw) defer tw.Close() if err := tw.AddFileToTar(dockerfile); err != nil { @@ -103,6 +103,7 @@ func TestBuildWithStdin(t *testing.T) { dockerRunFlags := []string{"run", "--interactive", "--net=host", "-v", cwd + ":/workspace"} dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount) + dockerRunFlags = addSecurityFlags(dockerRunFlags) dockerRunFlags = append(dockerRunFlags, ExecutorImage, "-f", dockerfile, diff --git a/integration/k8s-job.yaml b/integration/k8s-job.yaml index 4ebb628e8a..4405e95217 100644 --- a/integration/k8s-job.yaml +++ b/integration/k8s-job.yaml @@ -4,6 +4,11 @@ metadata: name: kaniko-test-{{.Name}} spec: template: + metadata: + annotations: + # disable seccomp and apparmor for chroot mounts + container.apparmor.security.beta.kubernetes.io/kaniko: unconfined + container.seccomp.security.alpha.kubernetes.io/kaniko: unconfined spec: hostNetwork: true containers: @@ -15,6 +20,10 @@ spec: volumeMounts: - name: context mountPath: /workspace + securityContext: + capabilities: + add: + - SYS_ADMIN restartPolicy: Never volumes: - name: context diff --git a/integration/tar.go b/integration/tar.go index 3bb51cb936..99b163c118 100644 --- a/integration/tar.go +++ b/integration/tar.go @@ -49,7 +49,7 @@ func CreateIntegrationTarball() (string, error) { gzipWriter := gzip.NewWriter(file) defer gzipWriter.Close() - err = util.CreateTarballOfDirectory(dir, file) + err = util.CreateTarballOfDirectory("/", dir, file) if err != nil { return "", fmt.Errorf("creating tarball of integration dir: %w", err) } diff --git a/pkg/buildcontext/tar_test.go b/pkg/buildcontext/tar_test.go index 5904b4e603..0ba26efcc4 100644 --- a/pkg/buildcontext/tar_test.go +++ b/pkg/buildcontext/tar_test.go @@ -84,7 +84,7 @@ func TestBuildWithLocalTar(t *testing.T) { gw := gzip.NewWriter(validTarFile) defer gw.Close() - tw := util.NewTar(gw) + tw := util.NewTar(testDirLongPath, gw) defer tw.Close() if err := tw.AddFileToTar(validDockerfile); err != nil { diff --git a/pkg/chroot/capabilities_linux.go b/pkg/chroot/capabilities_linux.go new file mode 100644 index 0000000000..f0f66a9f63 --- /dev/null +++ b/pkg/chroot/capabilities_linux.go @@ -0,0 +1,49 @@ +//go:build linux +// +build linux + +package chroot + +import ( + "fmt" + + "github.com/syndtr/gocapability/capability" +) + +// defaultCapabilities returns a Linux kernel default capabilities +var defaultCapabilities = []capability.Cap{ + capability.CAP_CHOWN, + capability.CAP_DAC_OVERRIDE, + capability.CAP_FSETID, + capability.CAP_FOWNER, + capability.CAP_MKNOD, + capability.CAP_NET_RAW, + capability.CAP_SETGID, + capability.CAP_SETUID, + capability.CAP_SETFCAP, + capability.CAP_SETPCAP, + capability.CAP_NET_BIND_SERVICE, + capability.CAP_KILL, + capability.CAP_AUDIT_WRITE, +} + +// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. +func setCapabilities() error { + caps, err := capability.NewPid2(0) + if err != nil { + return err + } + capMap := map[capability.CapType][]capability.Cap{ + capability.BOUNDING: defaultCapabilities, + capability.EFFECTIVE: defaultCapabilities, + capability.INHERITABLE: {}, + capability.PERMITTED: defaultCapabilities, + } + for capType, capList := range capMap { + caps.Set(capType, capList...) + } + err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS) + if err != nil { + return fmt.Errorf("applying capabiliies: %w", err) + } + return nil +} diff --git a/pkg/chroot/capabilities_linux_test.go b/pkg/chroot/capabilities_linux_test.go new file mode 100644 index 0000000000..7d66c3cd39 --- /dev/null +++ b/pkg/chroot/capabilities_linux_test.go @@ -0,0 +1,55 @@ +//go:build linux +// +build linux + +package chroot + +import ( + "os" + "runtime" + "testing" + + "github.com/syndtr/gocapability/capability" +) + +func Test_setCapabilities(t *testing.T) { + test := struct { + name string + wanted map[capability.CapType][]capability.Cap + wantErr bool + }{ + name: "default applied capabilities", + wanted: map[capability.CapType][]capability.Cap{ + capability.BOUNDING: defaultCapabilities, + capability.EFFECTIVE: defaultCapabilities, + capability.INHERITABLE: {}, + capability.PERMITTED: defaultCapabilities, + }, + wantErr: false, + } + if os.Getuid() != 0 { + t.Skip("calling user is not root, so can't load caps") + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + if err := setCapabilities(); (err != nil) != test.wantErr { + t.Fatalf("setCapabilities() error = %v, wantErr %v", err, test.wantErr) + } + // load the current caps + caps, err := capability.NewPid2(0) + if err != nil { + t.Fatal(err) + } + err = caps.Load() + if err != nil { + t.Fatal(err) + } + for capType, capList := range test.wanted { + for _, cap := range capList { + if !caps.Get(capType, cap) { + t.Errorf("cap %s on capType %s is not set but wanted", cap, capType) + } + } + } + t.Logf(caps.String()) +} diff --git a/pkg/chroot/chroot_linux.go b/pkg/chroot/chroot_linux.go new file mode 100644 index 0000000000..f547a75a65 --- /dev/null +++ b/pkg/chroot/chroot_linux.go @@ -0,0 +1,403 @@ +//go:build linux +// +build linux + +package chroot + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "syscall" + "time" + + "github.com/GoogleContainerTools/kaniko/pkg/unshare" + "github.com/docker/docker/pkg/reexec" + mobymount "github.com/moby/sys/mount" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + parentProcess = "chroot-parent-process" + childProcess = "chroot-child-process" + confPipeKey = "kaniko_conf_pipe" +) + +func init() { + reexec.Register(parentProcess, runParentProcessMain) + reexec.Register(childProcess, runChildProcessMain) + // when a reexec main was invoked, exit immediately + if reexec.Init() { + os.Exit(0) + } +} + +// cmd is exec.Cmd without io.Reader and io.Writer fields +// cmd is exec.Cmd without io.Reader and io.Writer fields +type cmd struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` + SysAttr *syscall.SysProcAttr `json:"sys_attr,omitempty"` + Dir string `json:"dir,omitempty"` +} + +func execCmdToCmd(execCmd *exec.Cmd) *cmd { + return &cmd{ + Path: execCmd.Path, + Args: execCmd.Args, + Env: execCmd.Env, + SysAttr: execCmd.SysProcAttr, + Dir: execCmd.Dir, + } +} + +func cmdToExecCmd(cmd *cmd) *exec.Cmd { + return &exec.Cmd{ + Path: cmd.Path, + Args: cmd.Args, + Env: cmd.Env, + SysProcAttr: cmd.SysAttr, + Dir: cmd.Dir, + // set std{in,out,err} to os versions because they didn't get marshaled + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +type config struct { + Cmd *cmd `json:"cmd,omitempty"` + NewRoot string `json:"new_root,omitempty"` +} + +// Run will execute the cmd inside a chrooted and newly created namespace environment +func Run(cmd *exec.Cmd, newRoot string) error { + // lockOSThread because changing the thread would kick us out of the namespaces + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Create a pipe for passing configuration down to the next process. + confReader, confWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("error creating configuration pipes: %w", err) + } + defer confReader.Close() + defer confWriter.Close() + + // marshal config for communication with subprocess + c := config{ + Cmd: execCmdToCmd(cmd), + NewRoot: newRoot, + } + + unshareCmd := unshare.Command(syscall.CLONE_NEWUSER|syscall.CLONE_NEWNS, parentProcess) + unshareCmd.Stderr, unshareCmd.Stdout, unshareCmd.Stdin = os.Stderr, os.Stdout, os.Stdin + sysProcAttr := unshareCmd.SysProcAttr + if sysProcAttr == nil { + sysProcAttr = &syscall.SysProcAttr{} + } + sysProcAttr.Pdeathsig = syscall.SIGKILL + + err = copyConfigIntoPipeAndStartChild(unshareCmd, &c, confReader, confWriter) + if err != nil { + err = fmt.Errorf("running unshare cmd: %w", err) + fmt.Fprintf(os.Stderr, "error: %v\n", err) + return err + } + err = unshareCmd.Wait() + if err != nil { + err = fmt.Errorf("error waiting for unshare cmd: %w", err) + fmt.Fprintf(os.Stderr, "error: %v\n", err) + return err + } + return nil +} + +// runParentProcessMain will create all needed mounts, pivot_root and execute the child. +// This is always executed inside a unshared environment. +func runParentProcessMain() { + // lockOSThread because changing the thread would kick us out of the namespaces + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + c, err := unmarshalConfigFromPipe() + if err != nil { + fmt.Fprintf(os.Stderr, "error unmarshal config from pipe: %v\n", err) + os.Exit(1) + } + + // TODO: remove debug stuff + logrus.Infof("i am %d", os.Getuid()) + uidmap, err := os.Open("/proc/self/uid_map") + if err != nil { + fmt.Fprintf(os.Stderr, "open uid_map for inspection: %v\n", err) + os.Exit(1) + } + uidmapContent, err := io.ReadAll(uidmap) + if err != nil { + fmt.Fprintf(os.Stderr, "reading uid_map for inspection: %v\n", err) + os.Exit(1) + } + logrus.Infof("uid_map content: %s", uidmapContent) + + // create mounts for pivot_root + undo, err := prepareMounts(c.NewRoot) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating mounts: %v\n", err) + os.Exit(1) + } + + defer func() { + logrus.Debug("undo mounting of chroot isolation") + undoErr := undo() + if undoErr != nil { + fmt.Fprintf(os.Stderr, "error undo mounting: %s\n", undoErr) + os.Exit(1) + } + }() + + err = pivotRoot(c.NewRoot) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + + // Create a pipe for passing configuration down to the next process. + confReader, confWriter, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "error creating configuration pipe: %v\n", err) + os.Exit(1) + } + defer confWriter.Close() + defer confReader.Close() + + // delay pid namespace until here, because pid would be wrong otherwise + childCmd := unshare.Command(syscall.CLONE_NEWPID, childProcess) + + childCmd.Stderr, childCmd.Stdout, childCmd.Stdin = os.Stderr, os.Stdout, os.Stdin + sysProcAttr := childCmd.SysProcAttr + if sysProcAttr == nil { + sysProcAttr = &syscall.SysProcAttr{} + } + sysProcAttr.Pdeathsig = syscall.SIGKILL + + err = copyConfigIntoPipeAndStartChild(childCmd, &c, confReader, confWriter) + if err != nil { + fmt.Fprintf(os.Stderr, "error running child: %v\n", err) + os.Exit(1) + } + childCmd.Wait() + if err != nil { + fmt.Fprintf(os.Stderr, "error waiting for child: %v\n", err) + os.Exit(1) + } + +} + +// runChildProcess will set capabilities and execute the initial cmd +// TODO: add apparmor and seccomp profiles +func runChildProcessMain() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + c, err := unmarshalConfigFromPipe() + if err != nil { + fmt.Fprintf(os.Stderr, "error unmarshal config from pipe: %v\n", err) + os.Exit(1) + } + + err = setCapabilities() + if err != nil { + fmt.Fprintf(os.Stderr, "error setting capabilities: %v\n", err) + os.Exit(1) + } + cmd := cmdToExecCmd(c.Cmd) + err = cmd.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "error running original command: %v\n", err) + os.Exit(1) + } +} + +// copyConfigIntoPipeAndStartChild will marshal the config into a pipe which will be passed to child. +// After that, the child will start, but not wait for it. +func copyConfigIntoPipeAndStartChild(child *unshare.Cmd, conf *config, confReader, confWriter *os.File) error { + // marshal config for communication with subprocess + confData, err := json.Marshal(conf) + if err != nil { + return fmt.Errorf("marshaling configuration: %w", err) + } + + child.Env = append(child.Env, fmt.Sprintf("%s=%d", confPipeKey, len(child.ExtraFiles)+3)) + child.ExtraFiles = append(child.ExtraFiles, confReader) + + err = child.Start() + if err != nil { + return fmt.Errorf("starting child process: %w", err) + } + _, err = io.Copy(confWriter, bytes.NewReader(confData)) + if err != nil { + return fmt.Errorf("copy configuration to pipe: %w", err) + } + return nil +} + +func unmarshalConfigFromPipe() (config, error) { + fdStr := os.Getenv(confPipeKey) + if fdStr == "" { + return config{}, fmt.Errorf("%v is not set, can't create pipe", confPipeKey) + } + fd, err := strconv.Atoi(fdStr) + if err != nil { + return config{}, fmt.Errorf("converting %v to integer: %w", fdStr, err) + } + confPipe := os.NewFile(uintptr(fd), confPipeKey) + defer confPipe.Close() + var c config + err = json.NewDecoder(confPipe).Decode(&c) + if err != nil { + return c, fmt.Errorf("decoding cmd config: %v", err) + } + return c, nil +} + +func pivotRoot(newRoot string) error { + err := unix.Chdir(newRoot) + if err != nil { + return fmt.Errorf("chdir to newRoot: %w", err) + } + err = unix.PivotRoot(newRoot, newRoot) + if err != nil { + return fmt.Errorf("syscall pivot_root: %w", err) + } + err = unmount(".") + if err != nil { + return fmt.Errorf("unmounting newRoot after pivot_root: %w", err) + } + return nil +} + +func prepareMounts(newRoot string, additionalMounts ...string) (undoMount func() error, err error) { + bindFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE) + devFlags := bindFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY + sysFlags := devFlags | unix.MS_NODEV + procFlags := devFlags | unix.MS_NODEV + type mountOpts struct { + flags uintptr + mountType string + } + mounts := map[string]mountOpts{ + "/etc/resolv.conf": {flags: unix.MS_RDONLY | bindFlags}, + "/etc/hostname": {flags: unix.MS_RDONLY | bindFlags}, + "/etc/hosts": {flags: unix.MS_RDONLY | bindFlags}, + "/dev": {flags: devFlags}, + "/sys": {flags: sysFlags}, + "/proc": {flags: procFlags}, + } + for _, add := range additionalMounts { + mounts[add] = mountOpts{flags: bindFlags} + } + // Create a new mount namespace in which to do the things we're doing. + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return nil, fmt.Errorf("error creating new mount namespace for %v: %w", newRoot, err) + } + // before mounting, make sure to make all mounts private + err = mobymount.MakeRPrivate("/") + if err != nil { + return nil, fmt.Errorf("marking mounts on / private: %w", err) + } + for src, opts := range mounts { + srcinfo, err := os.Lstat(src) + if err != nil { + return nil, fmt.Errorf("src %v for mount doesn't exist: %w", src, err) + } + dest := filepath.Join(newRoot, src) + err = createDest(srcinfo, dest) + if err != nil { + return nil, fmt.Errorf("creating dest %v: %w", dest, err) + } + err = mount(src, dest, opts.mountType, opts.flags) + if err != nil { + return nil, err + } + } + // self mount newRoot for pivot_root + // unmount will happen after pivot_root is called + err = mount(newRoot, newRoot, "", bindFlags) + if err != nil { + return nil, err + } + + undoMount = func() error { + for src := range mounts { + logrus.Debugf("unmounting %v", src) + err := unmount(src) + if err != nil { + return err + } + } + return nil + } + return undoMount, nil +} + +func unmount(dest string) error { + // perform lazy detaching if bind mount + err := unix.Unmount(dest, unix.MNT_DETACH) + if err != nil { + retries := 0 + for (err == unix.EBUSY || err == unix.EAGAIN) && retries < 50 { + time.Sleep(50 * time.Millisecond) + err = unix.Unmount(dest, unix.MNT_DETACH) + retries++ + } + if err != nil { + return fmt.Errorf("unmounting %q (retried %d times): %v", dest, retries, err) + } + } + return nil +} + +func mount(src, dest, mountType string, flags uintptr) error { + logrus.Debugf("mounting %v to %v", src, dest) + err := unix.Mount(src, dest, mountType, uintptr(flags), "") + if err != nil { + return fmt.Errorf("mounting %v to %v: %w", src, dest, err) + } + return nil +} + +func createDest(srcinfo fs.FileInfo, dest string) error { + // Check if target is a symlink + _, err := os.Lstat(dest) + if err != nil { + // If the target can't be stat()ted, check the error. + if !os.IsNotExist(err) { + return fmt.Errorf("error examining %q for mounting: %w", dest, err) + } + // The target isn't there yet, so create it. + if srcinfo.IsDir() { + if err = os.MkdirAll(dest, 0755); err != nil { + return fmt.Errorf("error creating mountpoint %q in mount namespace: %w", dest, err) + } + } else { + if err = os.MkdirAll(filepath.Dir(dest), 0755); err != nil { + return fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in new root: %w", dest, filepath.Dir(dest), err) + } + var file *os.File + if file, err = os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0755); err != nil { + return fmt.Errorf("error creating mountpoint %q: %w", dest, err) + } + file.Close() + } + } + return nil +} diff --git a/pkg/chroot/chroot_linux_test.go b/pkg/chroot/chroot_linux_test.go new file mode 100644 index 0000000000..142f5ce52c --- /dev/null +++ b/pkg/chroot/chroot_linux_test.go @@ -0,0 +1,88 @@ +//go:build linux +// +build linux + +package chroot + +import ( + "bytes" + "io" + "os/exec" + "path/filepath" + "testing" +) + +func TestRun(t *testing.T) { + tempDir := t.TempDir() + stdin, stdout, stderr := new(bytes.Buffer), new(bytes.Buffer), new(bytes.Buffer) + type args struct { + cmd *exec.Cmd + newRoot string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "simple ls -al", + args: args{ + cmd: &exec.Cmd{ + Path: "/bin/ls", + Args: []string{"ls", "-al"}, + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + }, + newRoot: tempDir, + }, + wantErr: false, + }, + { + name: "mount syscall should be denied", + args: args{ + cmd: &exec.Cmd{ + Path: "/bin/mount", + Args: []string{"mount", "--bind", "/tmp", "/bin"}, + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + }, + newRoot: tempDir, + }, + wantErr: true, + }, + } + + t.Logf("setup %v", tempDir) + err := setupNewRoot(tempDir) + if err != nil { + t.Fatal(err) + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.args.newRoot = tempDir + if err = Run(tt.args.cmd, tt.args.newRoot); (err != nil) != tt.wantErr { + output, errRead := io.ReadAll(stderr) + if errRead != nil { + t.Fatalf("can't read stderr: %v", errRead) + } + t.Logf("stderr output: %s\n", output) + t.Fatalf("Run() error = %v, wantErr %v", err, tt.wantErr) + } + + output, err := io.ReadAll(stderr) + if err != nil { + t.Fatalf("can't read stderr: %v", err) + } + t.Logf("stdout output: %s\n", output) + }) + } +} + +// setupNewRoot will unTar a debian:bullseye-slim filesystem on newRoot +func setupNewRoot(newRoot string) error { + debianTar := filepath.Join("testdata", "debian.tar") + // use unix tar because our tar implementation restores file permissions and needs root + cmd := exec.Command("tar", "xf", debianTar, "-C", newRoot) + return cmd.Run() +} diff --git a/pkg/chroot/homedir.go b/pkg/chroot/homedir.go new file mode 100644 index 0000000000..f1875b04e6 --- /dev/null +++ b/pkg/chroot/homedir.go @@ -0,0 +1,23 @@ +package chroot + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" +) + +func TmpDirInHome() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("getting homeDir: %w", err) + } + id := uuid.New() + tmpDir := filepath.Join(home, id.String()) + err = os.Mkdir(tmpDir, 0755) + if err != nil { + return "", err + } + return tmpDir, nil +} diff --git a/pkg/chroot/testdata/debian.tar b/pkg/chroot/testdata/debian.tar new file mode 100644 index 0000000000..fae48e31a7 Binary files /dev/null and b/pkg/chroot/testdata/debian.tar differ diff --git a/pkg/chroot/user/user.go b/pkg/chroot/user/user.go new file mode 100644 index 0000000000..e763c1b4d1 --- /dev/null +++ b/pkg/chroot/user/user.go @@ -0,0 +1,90 @@ +package chrootuser + +import ( + "fmt" + "os/user" + "strings" +) + +type lookupPasswdEntry struct { + name string + uid uint64 + gid uint64 + home string +} +type lookupGroupEntry struct { + name string + gid uint64 + user string +} + +// GetUser will return the uid, gid of the user specified in the userStr +// it will use the /etc/passwd and /etc/group files inside of the rootdir +// to return this information. +// userStr format [user | user:group | uid | uid:gid | user:gid | uid:group ] +func GetUser(rootdir string, userStr string) (*user.User, error) { + spec := strings.SplitN(userStr, ":", 2) + userStr = spec[0] + groupspec := "" + + if userStr == "" { + userStr = "0" + } + + if len(spec) > 1 { + groupspec = spec[1] + } + + userEntry, err := lookupUserInContainer(rootdir, userStr) + if err != nil { + return nil, err + } + + var groupEntry *lookupGroupEntry + if groupspec != "" { + groupEntry, err = lookupGroupInContainer(rootdir, groupspec) + if err != nil { + return nil, err + } + } + + homedir, err := lookupHomedirInContainer(rootdir, userEntry.uid) + if err != nil { + homedir = "/" + } + user := &user.User{ + Uid: fmt.Sprint(userEntry.uid), + Gid: fmt.Sprint(userEntry.gid), + HomeDir: homedir, + Username: userStr, + } + if groupEntry != nil { + user.Gid = fmt.Sprint(groupEntry.gid) + } + return user, nil +} + +func GetAdditionalGroupIDs(rootdir string, user *user.User) ([]string, error) { + gids, err := lookupAdditionalGroupsForUser(rootdir, user) + if err != nil { + return nil, err + } + gidsStr := make([]string, len(gids)) + for _, gid := range gids { + gidsStr = append(gidsStr, fmt.Sprint(gid)) + } + return gidsStr, nil +} + +// GetGroup returns the gid by looking it up in the /etc/group file +// groupspec format [ group | gid ] +func GetGroup(rootdir, groupspec string) (*user.Group, error) { + group, err := lookupGroupInContainer(rootdir, groupspec) + if err != nil { + return nil, err + } + return &user.Group{ + Gid: fmt.Sprint(group.gid), + Name: group.name, + }, nil +} diff --git a/pkg/chroot/user/user_linux.go b/pkg/chroot/user/user_linux.go new file mode 100644 index 0000000000..f59f9ffbbb --- /dev/null +++ b/pkg/chroot/user/user_linux.go @@ -0,0 +1,168 @@ +//go:build linux +// +build linux + +package chrootuser + +import ( + "bufio" + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "sync" +) + +var ( + lookupUser, lookupGroup sync.Mutex + // override for testing + openChrootedFileFunc = openChrootedFile +) + +func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { + if !rc.Scan() { + return nil + } + line := rc.Text() + fields := strings.Split(line, ":") + if len(fields) != 7 { + return nil + } + uid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + gid, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil + } + return &lookupPasswdEntry{ + name: fields[0], + uid: uid, + gid: gid, + home: fields[5], + } +} + +func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry { + if !rc.Scan() { + return nil + } + line := rc.Text() + fields := strings.Split(line, ":") + if len(fields) != 4 { + return nil + } + gid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + return &lookupGroupEntry{ + name: fields[0], + gid: gid, + user: fields[3], + } +} + +func lookupUserInContainer(rootdir, userStr string) (*lookupPasswdEntry, error) { + r, err := openChrootedFileFunc(rootdir, "/etc/passwd") + if err != nil { + return nil, err + } + rc := bufio.NewScanner(r) + defer r.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + // check name and uid match + if pwd.name != userStr { + if fmt.Sprint(pwd.uid) != userStr { + { + pwd = parseNextPasswd(rc) + continue + } + } + } + return pwd, nil + } + + return nil, user.UnknownUserError(userStr) +} + +func lookupGroupInContainer(rootdir, groupname string) (*lookupGroupEntry, error) { + r, err := openChrootedFileFunc(rootdir, "/etc/group") + if err != nil { + return nil, err + } + rc := bufio.NewScanner(r) + defer r.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + grp := parseNextGroup(rc) + for grp != nil { + if grp.name != groupname { + grp = parseNextGroup(rc) + continue + } + return grp, nil + } + + return nil, user.UnknownGroupError(groupname) +} + +func lookupAdditionalGroupsForUser(rootdir string, user *user.User) ([]uint32, error) { + r, err := openChrootedFileFunc(rootdir, "/etc/group") + if err != nil { + return nil, err + } + rc := bufio.NewScanner(r) + defer r.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + gids := []uint32{} + grp := parseNextGroup(rc) + for grp != nil { + if strings.Contains(grp.user, user.Username) || strings.Contains(grp.user, user.Uid) { + gids = append(gids, uint32(grp.gid)) + } + grp = parseNextGroup(rc) + } + return gids, nil +} + +func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) { + r, err := openChrootedFileFunc(rootdir, "/etc/passwd") + if err != nil { + return "", err + } + rc := bufio.NewScanner(r) + defer r.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != uid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.home, nil + } + + return "", user.UnknownUserError(fmt.Sprint(uid)) +} + +func openChrootedFile(rootDir string, file string) (io.ReadCloser, error) { + absFile := filepath.Join(rootDir, file) + return os.OpenFile(absFile, os.O_RDONLY, 0) +} diff --git a/pkg/chroot/user/user_linux_test.go b/pkg/chroot/user/user_linux_test.go new file mode 100644 index 0000000000..6f90d0f23d --- /dev/null +++ b/pkg/chroot/user/user_linux_test.go @@ -0,0 +1,282 @@ +//go:build linux +// +build linux + +package chrootuser + +import ( + "bufio" + "bytes" + "io" + "os/user" + "reflect" + "testing" + + "github.com/GoogleContainerTools/kaniko/testutil" +) + +func Test_parseNextPasswd(t *testing.T) { + tests := []struct { + name string + reader io.Reader + want *lookupPasswdEntry + }{ + { + name: "existing user", + want: &lookupPasswdEntry{ + name: "testuser", + uid: 1000, + gid: 1000, + home: "/home/test", + }, + reader: bytes.NewReader([]byte(passwd)), + }, + { + name: "malformed passwd", + want: nil, + reader: bytes.NewReader([]byte(malformedPasswd)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rc := bufio.NewScanner(tt.reader) + got := parseNextPasswd(rc) + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("wanted %#v, but got %#v", tt.want, got) + } + }) + } +} + +func Test_parseNextGroup(t *testing.T) { + tests := []struct { + name string + want *lookupGroupEntry + reader io.Reader + }{ + { + name: "test group", + want: &lookupGroupEntry{ + name: "bar", + gid: 2001, + user: "testuser,foo", + }, + reader: bytes.NewReader([]byte(group)), + }, + { + name: "malformed gid", + want: nil, + reader: bytes.NewReader([]byte(malformedGroups)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rc := bufio.NewScanner(tt.reader) + got := parseNextGroup(rc) + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("wanted %#v, but got %#v", tt.want, got) + } + }) + } +} + +func Test_lookupUserInContainer(t *testing.T) { + type args struct { + userStr string + } + tests := []struct { + name string + args args + wantUser *lookupPasswdEntry + wantErr bool + }{ + { + name: "existing user", + args: args{ + userStr: "foo", + }, + wantUser: &lookupPasswdEntry{ + uid: 2000, + gid: 2000, + name: "foo", + home: "/home/foo", + }, + wantErr: false, + }, + { + name: "non existing user", + args: args{ + userStr: "baz", + }, + wantErr: true, + }, + } + original := openChrootedFileFunc + openChrootedFileFunc = openPasswd + defer func() { + openChrootedFileFunc = original + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotUser, err := lookupUserInContainer("", tt.args.userStr) + testutil.CheckErrorAndDeepEqual(t, tt.wantErr, err, tt.wantUser, gotUser) + }) + } +} + +func Test_lookupGroupInContainer(t *testing.T) { + type args struct { + groupname string + } + tests := []struct { + name string + args args + wantGroupEntry *lookupGroupEntry + wantErr bool + }{ + { + name: "existing group", + args: args{ + groupname: "foo", + }, + wantGroupEntry: &lookupGroupEntry{ + name: "foo", + gid: 2000, + user: "1000", + }, + wantErr: false, + }, + { + name: "non existing group", + args: args{ + groupname: "no group", + }, + wantErr: true, + }, + } + original := openChrootedFileFunc + openChrootedFileFunc = openGroup + defer func() { + openChrootedFileFunc = original + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotGroupEntry, err := lookupGroupInContainer("", tt.args.groupname) + testutil.CheckErrorAndDeepEqual(t, tt.wantErr, err, gotGroupEntry, tt.wantGroupEntry) + }) + } +} + +func Test_lookupHomedirInContainer(t *testing.T) { + type args struct { + uid uint64 + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "existing user", + args: args{ + uid: 1000, + }, + want: "/home/test", + wantErr: false, + }, + { + name: "non existing user", + args: args{ + uid: 0, + }, + want: "", + wantErr: true, + }, + } + original := openChrootedFileFunc + openChrootedFileFunc = openPasswd + defer func() { + openChrootedFileFunc = original + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := lookupHomedirInContainer("", tt.args.uid) + testutil.CheckErrorAndDeepEqual(t, tt.wantErr, err, got, tt.want) + }) + } +} + +func Test_lookupAdditionalGroupsForUser(t *testing.T) { + type args struct { + user *user.User + } + tests := []struct { + name string + args args + wantGids []uint32 + wantErr bool + }{ + { + name: "user with uid and name in groups", + args: args{ + user: &user.User{ + Uid: "1000", + Username: "testuser", + }, + }, + wantGids: []uint32{2001, 2000}, + wantErr: false, + }, + { + name: "user with no additional groups", + args: args{ + user: &user.User{ + Uid: "2001", + Username: "bar", + }, + }, + wantGids: []uint32{}, + wantErr: false, + }, + } + original := openChrootedFileFunc + openChrootedFileFunc = openGroup + defer func() { + openChrootedFileFunc = original + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotGids, err := lookupAdditionalGroupsForUser("", tt.args.user) + if (err != nil) != tt.wantErr { + t.Errorf("lookupAdditionalGroupsForUser() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotGids, tt.wantGids) { + t.Errorf("lookupAdditionalGroupsForUser() = %v, want %v", gotGids, tt.wantGids) + } + }) + } +} + +var passwd = `testuser:x:1000:1000:I am test:/home/test:/bin/zsh +foo:x:2000:2000:I am foo:/home/foo:/bin/zsh +bar:x:2001:2001:I am bar:/home/bar:/bin/zsh +` +var malformedPasswd = `bar:x:awdjawdj:foo` + +func openPasswd(rootDir string, file string) (io.ReadCloser, error) { + r := bytes.NewReader([]byte(passwd)) + return io.NopCloser(r), nil +} + +var group = `bar:x:2001:testuser,foo +foo:x:2000:1000 +test:x:1000: +` + +var malformedGroups = `bar:x:awdjawdj:foo` + +func openGroup(rootDir string, file string) (io.ReadCloser, error) { + r := bytes.NewReader([]byte(group)) + return io.NopCloser(r), nil +} diff --git a/pkg/chroot/user/user_undefined.go b/pkg/chroot/user/user_undefined.go new file mode 100644 index 0000000000..d3ef3a188f --- /dev/null +++ b/pkg/chroot/user/user_undefined.go @@ -0,0 +1,25 @@ +//go:build !linux +// +build !linux + +package chrootuser + +import ( + "errors" + "os/user" +) + +func lookupUserInContainer(rootdir, username string) (*lookupPasswdEntry, error) { + return nil, errors.New("lookupUserInContainer is only available on linux") +} + +func lookupGroupInContainer(rootdir, groupname string) (*lookupGroupEntry, error) { + return nil, errors.New("lookupGroupInContainer is only available on linux") +} + +func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) { + return "", errors.New("lookupHomedirInContainer is only available on linux") +} + +func lookupAdditionalGroupsForUser(rootdir string, user *user.User) (gids []uint32, err error) { + return nil, errors.New("lookupAdditionalGroupsForUser is only available on linux") +} diff --git a/pkg/commands/add.go b/pkg/commands/add.go index ae86f35380..5107229647 100644 --- a/pkg/commands/add.go +++ b/pkg/commands/add.go @@ -27,6 +27,8 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/sirupsen/logrus" + + "strings" ) type AddCommand struct { @@ -34,6 +36,7 @@ type AddCommand struct { cmd *instructions.AddCommand fileContext util.FileContext snapshotFiles []string + rootDir string } // ExecuteCommand executes the ADD command @@ -47,7 +50,7 @@ type AddCommand struct { func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - uid, gid, err := util.GetUserGroup(a.cmd.Chown, replacementEnvs) + uid, gid, err := util.GetUserGroup(a.rootDir, a.cmd.Chown, replacementEnvs) if err != nil { return errors.Wrap(err, "getting user group from chown") } @@ -57,6 +60,11 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui return err } + // prepend rootDir in case we are chrooting + if !strings.HasPrefix(dest, a.rootDir) { + dest = filepath.Join(a.rootDir, dest) + } + var unresolvedSrcs []string // If any of the sources are local tar archives: // 1. Unpack them to the specified destination @@ -102,6 +110,7 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui Chown: a.cmd.Chown, }, fileContext: a.fileContext, + rootDir: a.rootDir, } if err := copyCmd.ExecuteCommand(config, buildArgs); err != nil { diff --git a/pkg/commands/commands.go b/pkg/commands/commands.go index 2683c98f56..dba7854d53 100644 --- a/pkg/commands/commands.go +++ b/pkg/commands/commands.go @@ -18,6 +18,7 @@ package commands import ( "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/pkg/util" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -60,15 +61,15 @@ type DockerCommand interface { ShouldDetectDeletedFiles() bool } -func GetCommand(cmd instructions.Command, fileContext util.FileContext, useNewRun bool, cacheCopy bool) (DockerCommand, error) { +func GetCommand(cmd instructions.Command, fileContext util.FileContext, useNewRun bool, cacheCopy bool, rootDir string, isolator isolation.Isolator) (DockerCommand, error) { switch c := cmd.(type) { case *instructions.RunCommand: if useNewRun { - return &RunMarkerCommand{cmd: c}, nil + return &RunMarkerCommand{cmd: c, rootDir: rootDir, isolator: isolator}, nil } - return &RunCommand{cmd: c}, nil + return &RunCommand{cmd: c, rootDir: rootDir, isolator: isolator}, nil case *instructions.CopyCommand: - return &CopyCommand{cmd: c, fileContext: fileContext, shdCache: cacheCopy}, nil + return &CopyCommand{cmd: c, fileContext: fileContext, shdCache: cacheCopy, rootDir: rootDir}, nil case *instructions.ExposeCommand: return &ExposeCommand{cmd: c}, nil case *instructions.EnvCommand: @@ -76,7 +77,7 @@ func GetCommand(cmd instructions.Command, fileContext util.FileContext, useNewRu case *instructions.WorkdirCommand: return &WorkdirCommand{cmd: c}, nil case *instructions.AddCommand: - return &AddCommand{cmd: c, fileContext: fileContext}, nil + return &AddCommand{cmd: c, fileContext: fileContext, rootDir: rootDir}, nil case *instructions.CmdCommand: return &CmdCommand{cmd: c}, nil case *instructions.EntrypointCommand: diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index c85b0f89e8..9d60db9577 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -43,16 +43,17 @@ type CopyCommand struct { fileContext util.FileContext snapshotFiles []string shdCache bool + rootDir string } func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { // Resolve from if c.cmd.From != "" { - c.fileContext = util.FileContext{Root: filepath.Join(kConfig.KanikoDir, c.cmd.From)} + c.fileContext = util.FileContext{Root: filepath.Join(kConfig.KanikoDependencyDir, c.cmd.From)} } replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - uid, gid, err := getUserGroup(c.cmd.Chown, replacementEnvs) + uid, gid, err := getUserGroup(c.rootDir, c.cmd.Chown, replacementEnvs) logrus.Debugf("found uid %v and gid %v for chown string %v", uid, gid, c.cmd.Chown) if err != nil { return errors.Wrap(err, "getting user group from chown") @@ -76,7 +77,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu } cwd := config.WorkingDir if cwd == "" { - cwd = kConfig.RootDir + cwd = c.rootDir } destPath, err := util.DestinationFilepath(fullPath, dest, cwd) @@ -91,6 +92,11 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu return errors.Wrap(err, "resolving dest symlink") } + // prepend rootDir in case we are chrooting + if !strings.HasPrefix(destPath, c.rootDir) { + destPath = filepath.Join(c.rootDir, destPath) + } + if fi.IsDir() { copiedFiles, err := util.CopyDir(fullPath, destPath, c.fileContext, uid, gid) if err != nil { @@ -170,6 +176,7 @@ type CachingCopyCommand struct { cmd *instructions.CopyCommand fileContext util.FileContext extractFn util.ExtractFunction + rootDir string } func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { @@ -190,9 +197,9 @@ func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *docke } cr.layer = layers[0] - cr.extractedFiles, err = util.GetFSFromLayers(kConfig.RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout()) + cr.extractedFiles, err = util.GetFSFromLayers(cr.rootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout()) - logrus.Debugf("ExtractedFiles: %s", cr.extractedFiles) + logrus.Tracef("ExtractedFiles: %s", cr.extractedFiles) if err != nil { return errors.Wrap(err, "extracting fs from image") } diff --git a/pkg/commands/copy_test.go b/pkg/commands/copy_test.go index f80ac1d845..fe83d9af2b 100755 --- a/pkg/commands/copy_test.go +++ b/pkg/commands/copy_test.go @@ -145,6 +145,7 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { "foo.txt", "foo.txt", }, }, + rootDir: tempDir, } count := 0 tc := testCase{ @@ -152,7 +153,7 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { count: &count, expectedCount: 1, expectLayer: true, - extractedFiles: []string{"/foo.txt"}, + extractedFiles: []string{filepath.Join(tempDir, "foo.txt")}, contextFiles: []string{"foo.txt"}, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { @@ -163,7 +164,9 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { return tc }(), func() testCase { - c := &CachingCopyCommand{} + c := &CachingCopyCommand{ + rootDir: tempDir, + } tc := testCase{ desctiption: "with no image", expectErr: true, @@ -176,7 +179,8 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { }(), func() testCase { c := &CachingCopyCommand{ - img: fakeImage{}, + img: fakeImage{}, + rootDir: tempDir, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { return nil @@ -194,6 +198,7 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { fakeLayer{}, }, }, + rootDir: tempDir, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { return nil @@ -843,7 +848,7 @@ func TestCopyCommand_ExecuteCommand_Extended(t *testing.T) { uid := os.Getuid() gid := os.Getgid() - getUserGroup = func(userStr string, _ []string) (int64, int64, error) { + getUserGroup = func(rootDir, userStr string, _ []string) (int64, int64, error) { return int64(uid), int64(gid), nil } @@ -888,7 +893,7 @@ func TestCopyCommand_ExecuteCommand_Extended(t *testing.T) { original := getUserGroup defer func() { getUserGroup = original }() - getUserGroup = func(userStr string, _ []string) (int64, int64, error) { + getUserGroup = func(rootDir, userStr string, _ []string) (int64, int64, error) { return 12345, 12345, nil } diff --git a/pkg/commands/run.go b/pkg/commands/run.go index 3290ca0192..96e146ba0a 100644 --- a/pkg/commands/run.go +++ b/pkg/commands/run.go @@ -23,9 +23,9 @@ import ( "strings" "syscall" - kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/pkg/util" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -35,7 +35,9 @@ import ( type RunCommand struct { BaseCommand - cmd *instructions.RunCommand + cmd *instructions.RunCommand + isolator isolation.Isolator + rootDir string } // for testing @@ -44,10 +46,10 @@ var ( ) func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { - return runCommandInExec(config, buildArgs, r.cmd) + return runCommandInExec(config, buildArgs, r.cmd, r.rootDir, r.isolator) } -func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun *instructions.RunCommand) error { +func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun *instructions.RunCommand, rootDir string, isolator isolation.Isolator) error { var newCommand []string if cmdRun.PrependShell { // This is the default shell on Linux @@ -98,13 +100,15 @@ func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun // If specified, run the command as a specific user if userStr != "" { - cmd.SysProcAttr.Credential, err = util.SyscallCredentials(userStr) + creds, err := util.SyscallCredentials(rootDir, userStr) if err != nil { - return errors.Wrap(err, "credentials") + return fmt.Errorf("get syscallCredentials from userStr %v: %w", userStr, err) } + logrus.Debugf("syscallCredentials for %v: %#v", userStr, creds) + cmd.SysProcAttr.Credential = creds } - env, err := addDefaultHOME(userStr, replacementEnvs) + env, err := addDefaultHOME(rootDir, userStr, replacementEnvs) if err != nil { return errors.Wrap(err, "adding default HOME variable") } @@ -112,27 +116,11 @@ func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun cmd.Env = env logrus.Infof("Running: %s", cmd.Args) - if err := cmd.Start(); err != nil { - return errors.Wrap(err, "starting command") - } - - pgid, err := syscall.Getpgid(cmd.Process.Pid) - if err != nil { - return errors.Wrap(err, "getting group id for process") - } - if err := cmd.Wait(); err != nil { - return errors.Wrap(err, "waiting for process to exit") - } - - //it's not an error if there are no grandchildren - if err := syscall.Kill(-pgid, syscall.SIGKILL); err != nil && err.Error() != "no such process" { - return err - } - return nil + return isolator.ExecRunCommand(cmd) } // addDefaultHOME adds the default value for HOME if it isn't already set -func addDefaultHOME(u string, envs []string) ([]string, error) { +func addDefaultHOME(rootDir string, u string, envs []string) ([]string, error) { for _, env := range envs { split := strings.SplitN(env, "=", 2) if split[0] == constants.HOME { @@ -147,7 +135,7 @@ func addDefaultHOME(u string, envs []string) ([]string, error) { // If user is set to username, set value of HOME to /home/${user} // Otherwise the user is set to uid and HOME is / - userObj, err := userLookup(u) + userObj, err := userLookup(rootDir, u) if err != nil { return nil, fmt.Errorf("lookup user %v: %w", u, err) } @@ -175,6 +163,7 @@ func (r *RunCommand) CacheCommand(img v1.Image) DockerCommand { img: img, cmd: r.cmd, extractFn: util.ExtractFile, + rootDir: r.rootDir, } } @@ -197,6 +186,7 @@ type CachingRunCommand struct { extractedFiles []string cmd *instructions.RunCommand extractFn util.ExtractFunction + rootDir string } func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { @@ -219,7 +209,7 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker cr.layer = layers[0] cr.extractedFiles, err = util.GetFSFromLayers( - kConfig.RootDir, + cr.rootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout(), diff --git a/pkg/commands/run_marker.go b/pkg/commands/run_marker.go index bc7482f088..bcee834282 100644 --- a/pkg/commands/run_marker.go +++ b/pkg/commands/run_marker.go @@ -20,6 +20,7 @@ import ( "os" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/pkg/util" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -28,15 +29,17 @@ import ( type RunMarkerCommand struct { BaseCommand - cmd *instructions.RunCommand - Files []string + cmd *instructions.RunCommand + Files []string + rootDir string + isolator isolation.Isolator } func (r *RunMarkerCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { // run command `touch filemarker` logrus.Debugf("Using new RunMarker command") prevFilesMap, _ := util.GetFSInfoMap("/", map[string]os.FileInfo{}) - if err := runCommandInExec(config, buildArgs, r.cmd); err != nil { + if err := runCommandInExec(config, buildArgs, r.cmd, r.rootDir, r.isolator); err != nil { return err } _, r.Files = util.GetFSInfoMap("/", prevFilesMap) diff --git a/pkg/commands/run_test.go b/pkg/commands/run_test.go index 0d19d94b26..a8cb0b65d0 100644 --- a/pkg/commands/run_test.go +++ b/pkg/commands/run_test.go @@ -111,11 +111,11 @@ func Test_addDefaultHOME(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { original := userLookup - userLookup = func(username string) (*user.User, error) { return test.mockUser, test.lookupError } + userLookup = func(rootDir, username string) (*user.User, error) { return test.mockUser, test.lookupError } defer func() { userLookup = original }() - actual, err := addDefaultHOME(test.user, test.initial) + actual, err := addDefaultHOME("/", test.user, test.initial) testutil.CheckErrorAndDeepEqual(t, false, err, test.expected, actual) }) } @@ -167,6 +167,8 @@ meow meow meow meow } func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { + testDir := t.TempDir() + tarContent, err := prepareTarFixture(t, []string{"foo.txt"}) if err != nil { t.Errorf("couldn't prepare tar fixture %v", err) @@ -193,6 +195,7 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { fakeLayer{TarContent: tarContent}, }, }, + rootDir: testDir, } count := 0 tc := testCase{ @@ -200,7 +203,7 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { count: &count, expectedCount: 1, expectLayer: true, - extractedFiles: []string{"/foo.txt"}, + extractedFiles: []string{filepath.Join(testDir, "foo.txt")}, contextFiles: []string{"foo.txt"}, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { @@ -211,7 +214,9 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { return tc }(), func() testCase { - c := &CachingRunCommand{} + c := &CachingRunCommand{ + rootDir: testDir, + } tc := testCase{ desctiption: "with no image", expectErr: true, @@ -224,7 +229,8 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { }(), func() testCase { c := &CachingRunCommand{ - img: fakeImage{}, + img: fakeImage{}, + rootDir: testDir, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { @@ -244,6 +250,7 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { fakeLayer{}, }, }, + rootDir: testDir, } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { return nil diff --git a/pkg/config/init.go b/pkg/config/init.go index 0ed6803589..1b7a6054a4 100644 --- a/pkg/config/init.go +++ b/pkg/config/init.go @@ -19,12 +19,11 @@ package config import ( "fmt" "os" + "path/filepath" "github.com/GoogleContainerTools/kaniko/pkg/constants" ) -var RootDir string - // KanikoDir is the path to the Kaniko directory var KanikoDir = func() string { if kd, ok := os.LookupEnv("KANIKO_DIR"); ok { @@ -42,11 +41,15 @@ var BuildContextDir = fmt.Sprintf("%s/buildcontext/", KanikoDir) // KanikoIntermediateStagesDir is where we will store intermediate stages // as tarballs in case they are needed later on -var KanikoIntermediateStagesDir = fmt.Sprintf("%s/stages/", KanikoDir) +var KanikoIntermediateStagesDir = filepath.Join(KanikoDir, "stages") + +// KanikoDependencyDir will store files that need to be +// used later on during built process +var KanikoDependencyDir = filepath.Join(KanikoDir, "saved") var IgnoreListPath string func init() { - RootDir = constants.RootDir + // RootDir = constants.RootDir IgnoreListPath = constants.IgnoreListPath } diff --git a/pkg/config/options.go b/pkg/config/options.go index b8214ca975..189903517d 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -65,6 +65,7 @@ type KanikoOptions struct { ImageNameDigestFile string ImageNameTagDigestFile string OCILayoutPath string + Isolation string ImageFSExtractRetry int SingleSnapshot bool Reproducible bool diff --git a/pkg/executor/build.go b/pkg/executor/build.go index a033f5226f..bec6159f90 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -43,6 +43,7 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" image_util "github.com/GoogleContainerTools/kaniko/pkg/image" "github.com/GoogleContainerTools/kaniko/pkg/image/remote" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/pkg/snapshot" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" @@ -55,14 +56,17 @@ const emptyTarSize = 1024 // for testing var ( initializeConfig = initConfig + newIsolator = newIsolatorFunc ) -type cachePusher func(*config.KanikoOptions, string, string, string) error -type snapShotter interface { - Init() error - TakeSnapshotFS() (string, error) - TakeSnapshot([]string, bool, bool) (string, error) -} +type ( + cachePusher func(*config.KanikoOptions, string, string, string) error + snapShotter interface { + Init() error + TakeSnapshotFS() (string, error) + TakeSnapshot([]string, bool, bool) (string, error) + } +) // stageBuilder contains all fields necessary to build one stage of a Dockerfile type stageBuilder struct { @@ -81,10 +85,20 @@ type stageBuilder struct { snapshotter snapShotter layerCache cache.LayerCache pushLayerToCache cachePusher + isolator isolation.Isolator } // newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage -func newStageBuilder(args *dockerfile.BuildArgs, opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string, sid map[string]string, stageNameToIdx map[string]string, fileContext util.FileContext) (*stageBuilder, error) { +func newStageBuilder( + args *dockerfile.BuildArgs, + opts *config.KanikoOptions, + stage config.KanikoStage, + crossStageDeps map[int][]string, + dcm map[string]string, + sid map[string]string, + stageNameToIdx map[string]string, + fileContext util.FileContext, +) (*stageBuilder, error) { sourceImage, err := image_util.RetrieveSourceImage(stage, opts) if err != nil { return nil, err @@ -99,18 +113,6 @@ func newStageBuilder(args *dockerfile.BuildArgs, opts *config.KanikoOptions, sta return nil, err } - err = util.InitIgnoreList(true) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize ignore list") - } - - hasher, err := getHasher(opts.SnapshotMode) - if err != nil { - return nil, err - } - l := snapshot.NewLayeredMap(hasher) - snapshotter := snapshot.NewSnapshotter(l, config.RootDir) - digest, err := sourceImage.Digest() if err != nil { return nil, err @@ -119,7 +121,6 @@ func newStageBuilder(args *dockerfile.BuildArgs, opts *config.KanikoOptions, sta stage: stage, image: sourceImage, cf: imageConfig, - snapshotter: snapshotter, baseImageDigest: digest.String(), opts: opts, fileContext: fileContext, @@ -130,26 +131,40 @@ func newStageBuilder(args *dockerfile.BuildArgs, opts *config.KanikoOptions, sta Opts: opts, }, pushLayerToCache: pushLayerToCache, + isolator: newIsolator(opts.Isolation), } + if args != nil { + s.args = args.Clone() + } else { + s.args = dockerfile.NewBuildArgs(s.opts.BuildArgs) + } + s.args.AddMetaArgs(s.stage.MetaArgs) + return s, nil +} + +func (s *stageBuilder) parseCommands(rootDir string) error { for _, cmd := range s.stage.Commands { - command, err := commands.GetCommand(cmd, fileContext, opts.RunV2, opts.CacheCopyLayers) + command, err := commands.GetCommand(cmd, s.fileContext, s.opts.RunV2, s.opts.CacheCopyLayers, rootDir, s.isolator) if err != nil { - return nil, err + return err } if command == nil { continue } s.cmds = append(s.cmds, command) } + return nil +} - if args != nil { - s.args = args.Clone() - } else { - s.args = dockerfile.NewBuildArgs(s.opts.BuildArgs) +func (s *stageBuilder) setupSnapshotter(rootDir string) error { + hasher, err := getHasher(s.opts.SnapshotMode) + if err != nil { + return err } - s.args.AddMetaArgs(s.stage.MetaArgs) - return s, nil + l := snapshot.NewLayeredMap(hasher) + s.snapshotter = snapshot.NewSnapshotter(l, rootDir) + return nil } func initConfig(img partial.WithConfigFile, opts *config.KanikoOptions) (*v1.ConfigFile, error) { @@ -183,7 +198,13 @@ func initConfig(img partial.WithConfigFile, opts *config.KanikoOptions) (*v1.Con return imageConfig, nil } -func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache, args *dockerfile.BuildArgs, env []string) (CompositeCache, error) { +func (s *stageBuilder) populateCompositeKey( + command fmt.Stringer, + files []string, + compositeKey CompositeCache, + args *dockerfile.BuildArgs, + env []string, +) (CompositeCache, error) { // First replace all the environment variables or args in the command replacementEnvs := args.ReplacementEnvs(env) // The sort order of `replacementEnvs` is basically undefined, sort it @@ -215,7 +236,11 @@ func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string return compositeKey, nil } -func (s *stageBuilder) populateCopyCmdCompositeKey(command fmt.Stringer, from string, compositeKey CompositeCache) CompositeCache { +func (s *stageBuilder) populateCopyCmdCompositeKey( + command fmt.Stringer, + from string, + compositeKey CompositeCache, +) CompositeCache { if from != "" { digest, ok := s.stageIdxToDigest[from] if ok { @@ -235,7 +260,7 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro if !s.opts.Cache { return nil } - var buildArgs = s.args.Clone() + buildArgs := s.args.Clone() // Restore build args back to their original values defer func() { s.args = buildArgs @@ -271,7 +296,6 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro if command.ShouldCacheOutput() && !stopCache { img, err := s.layerCache.RetrieveLayer(ck) - if err != nil { logrus.Debugf("Failed to retrieve layer: %s", err) logrus.Infof("No cached layer found for cmd %s", command.String()) @@ -296,7 +320,7 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro return nil } -func (s *stageBuilder) build() error { +func (s *stageBuilder) build(rootDir string) error { // Set the initial cache key to be the base image digest, the build args and the SrcContext. var compositeKey *CompositeCache if cacheKey, ok := s.digestToCacheKey[s.baseImageDigest]; ok { @@ -314,7 +338,7 @@ func (s *stageBuilder) build() error { shouldUnpack := false for _, cmd := range s.cmds { if cmd.RequiresUnpackedFS() { - logrus.Infof("Unpacking rootfs as cmd %s requires it.", cmd.String()) + logrus.Infof("Unpacking rootfs as cmd %s requires it", cmd) shouldUnpack = true break } @@ -323,23 +347,6 @@ func (s *stageBuilder) build() error { shouldUnpack = true } - if shouldUnpack { - t := timing.Start("FS Unpacking") - - retryFunc := func() error { - _, err := util.GetFSFromImage(config.RootDir, s.image, util.ExtractFile) - return err - } - - if err := util.Retry(retryFunc, s.opts.ImageFSExtractRetry, 1000); err != nil { - return errors.Wrap(err, "failed to get filesystem from image") - } - - timing.DefaultRun.Stop(t) - } else { - logrus.Info("Skipping unpacking as no commands require it.") - } - initSnapshotTaken := false if s.opts.SingleSnapshot || s.opts.RunV2 { if err := s.initSnapshotWithTimings(); err != nil { @@ -348,94 +355,142 @@ func (s *stageBuilder) build() error { initSnapshotTaken = true } - cacheGroup := errgroup.Group{} + if shouldUnpack { + err := unpackFS(s.image, rootDir, s.opts.ImageFSExtractRetry) + if err != nil { + return fmt.Errorf("unpacking fs: %w", err) + } + } else { + logrus.Info("Skipping unpacking as no commands require it.") + } + + cacheGroup := new(errgroup.Group) for index, command := range s.cmds { if command == nil { continue } t := timing.Start("Command: " + command.String()) - - // If the command uses files from the context, add them. - files, err := command.FilesUsedFromContext(&s.cf.Config, s.args) + err := s.runCommand(index, command, compositeKey, t, &initSnapshotTaken, cacheGroup) if err != nil { - return errors.Wrap(err, "failed to get files used from context") + return fmt.Errorf("running command %s: %w", command, err) } + } - if s.opts.Cache { - *compositeKey, err = s.populateCompositeKey(command, files, *compositeKey, s.args, s.cf.Config.Env) - if err != nil && s.opts.Cache { - return err - } - } + if err := cacheGroup.Wait(); err != nil { + logrus.Warnf("Error uploading layer to cache: %s", err) + } + + return nil +} - logrus.Info(command.String()) +func newIsolatorFunc(isoType string) isolation.Isolator { + switch isoType { + case "chroot": + return new(isolation.Chroot) + default: + return isolation.None{} + } +} - isCacheCommand := func() bool { - switch command.(type) { - case commands.Cached: - return true - default: - return false - } - }() - if !initSnapshotTaken && !isCacheCommand && !command.ProvidesFilesToSnapshot() { - // Take initial snapshot if command does not expect to return - // a list of files. - if err := s.initSnapshotWithTimings(); err != nil { - return err - } - initSnapshotTaken = true +func (s *stageBuilder) runCommand( + index int, + cmd commands.DockerCommand, + compositeKey *CompositeCache, + t *timing.Timer, + initSnapshotTaken *bool, + cacheGroup *errgroup.Group, +) error { + // If the command uses files from the context, add them. + files, err := cmd.FilesUsedFromContext(&s.cf.Config, s.args) + if err != nil { + return errors.Wrap(err, "failed to get files used from context") + } + + if s.opts.Cache { + *compositeKey, err = s.populateCompositeKey(cmd, files, *compositeKey, s.args, s.cf.Config.Env) + if err != nil && s.opts.Cache { + return err } + } + + logrus.Info(cmd.String()) - if err := command.ExecuteCommand(&s.cf.Config, s.args); err != nil { - return errors.Wrap(err, "failed to execute command") + isCacheCommand := func() bool { + switch cmd.(type) { + case commands.Cached: + return true + default: + return false + } + }() + if !*initSnapshotTaken && !isCacheCommand && !cmd.ProvidesFilesToSnapshot() { + // Take initial snapshot if command does not expect to return + // a list of files. + if err := s.initSnapshotWithTimings(); err != nil { + return err } - files = command.FilesToSnapshot() - timing.DefaultRun.Stop(t) + *initSnapshotTaken = true + } - if !s.shouldTakeSnapshot(index, command.MetadataOnly()) && !s.opts.ForceBuildMetadata { - logrus.Debugf("Build: skipping snapshot for [%v]", command.String()) - continue + if err := cmd.ExecuteCommand(&s.cf.Config, s.args); err != nil { + return errors.Wrap(err, "failed to execute command") + } + files = cmd.FilesToSnapshot() + timing.DefaultRun.Stop(t) + + if !s.shouldTakeSnapshot(index, cmd.MetadataOnly()) && !s.opts.ForceBuildMetadata { + logrus.Debugf("Build: skipping snapshot for [%v]", cmd.String()) + return nil + } + if isCacheCommand { + v := cmd.(commands.Cached) + layer := v.Layer() + if err := s.saveLayerToImage(layer, cmd.String()); err != nil { + return errors.Wrap(err, "failed to save layer") } - if isCacheCommand { - v := command.(commands.Cached) - layer := v.Layer() - if err := s.saveLayerToImage(layer, command.String()); err != nil { - return errors.Wrap(err, "failed to save layer") - } - } else { - tarPath, err := s.takeSnapshot(files, command.ShouldDetectDeletedFiles()) - if err != nil { - return errors.Wrap(err, "failed to take snapshot") - } + return nil + } + tarPath, err := s.takeSnapshot(files, cmd.ShouldDetectDeletedFiles()) + if err != nil { + return errors.Wrap(err, "failed to take snapshot") + } - if s.opts.Cache { - logrus.Debugf("Build: composite key for command %v %v", command.String(), compositeKey) - ck, err := compositeKey.Hash() - if err != nil { - return errors.Wrap(err, "failed to hash composite key") - } + if s.opts.Cache { + logrus.Debugf("Build: composite key for command %v %v", cmd.String(), compositeKey) + ck, err := compositeKey.Hash() + if err != nil { + return errors.Wrap(err, "failed to hash composite key") + } - logrus.Debugf("Build: cache key for command %v %v", command.String(), ck) + logrus.Debugf("Build: cache key for command %v %v", cmd.String(), ck) - // Push layer to cache (in parallel) now along with new config file - if command.ShouldCacheOutput() && !s.opts.NoPushCache { - cacheGroup.Go(func() error { - return s.pushLayerToCache(s.opts, ck, tarPath, command.String()) - }) - } - } - if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil { - return errors.Wrap(err, "failed to save snapshot to image") - } + // Push layer to cache (in parallel) now along with new config file + if cmd.ShouldCacheOutput() && !s.opts.NoPushCache { + cacheGroup.Go(func() error { + return s.pushLayerToCache(s.opts, ck, tarPath, cmd.String()) + }) } } + if err := s.saveSnapshotToImage(cmd.String(), tarPath); err != nil { + return errors.Wrap(err, "failed to save snapshot to image") + } + return nil +} - if err := cacheGroup.Wait(); err != nil { - logrus.Warnf("Error uploading layer to cache: %s", err) +func unpackFS(image v1.Image, rootDir string, retryCount int) error { + t := timing.Start("FS Unpacking") + + retryFunc := func() error { + _, err := util.GetFSFromImage(rootDir, image, util.ExtractFile) + return err } + if err := util.Retry(retryFunc, retryCount, 1000); err != nil { + return errors.Wrap(err, "failed to get filesystem from image") + } + + timing.DefaultRun.Stop(t) return nil } @@ -510,6 +565,7 @@ func (s *stageBuilder) saveSnapshotToLayer(tarPath string) (v1.Layer, error) { return layer, nil } + func (s *stageBuilder) saveLayerToImage(layer v1.Layer, createdBy string) error { var err error s.image, err = mutate.Append(s.image, @@ -524,7 +580,11 @@ func (s *stageBuilder) saveLayerToImage(layer v1.Layer, createdBy string) error return err } -func CalculateDependencies(stages []config.KanikoStage, opts *config.KanikoOptions, stageNameToIdx map[string]string) (map[int][]string, error) { +func CalculateDependencies( + stages []config.KanikoStage, + opts *config.KanikoOptions, + stageNameToIdx map[string]string, +) (map[int][]string, error) { images := []v1.Image{} depGraph := map[int][]string{} for _, s := range stages { @@ -587,6 +647,14 @@ func CalculateDependencies(stages []config.KanikoStage, opts *config.KanikoOptio return depGraph, nil } +func createNeededBuildDirs() error { + err := os.MkdirAll(config.KanikoDependencyDir, 0755) + if err != nil { + return err + } + return os.MkdirAll(config.KanikoIntermediateStagesDir, 0755) +} + // DoBuild executes building the Dockerfile func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { t := timing.Start("Total Build Time") @@ -609,6 +677,11 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { return nil, err } + err = createNeededBuildDirs() + if err != nil { + return nil, fmt.Errorf("creating needed directories: %w", err) + } + // Some stages may refer to other random images, not previous stages if err := fetchExtraStages(kanikoStages, opts); err != nil { return nil, err @@ -621,15 +694,15 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { var args *dockerfile.BuildArgs - for index, stage := range kanikoStages { - + for _, stage := range kanikoStages { sb, err := newStageBuilder( args, opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest, stageNameToIdx, - fileContext) + fileContext, + ) logrus.Infof("Building stage '%v' [idx: '%v', base-idx: '%v']", stage.BaseName, stage.Index, stage.BaseImageIndex) @@ -638,116 +711,152 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { return nil, err } args = sb.args - if err := sb.build(); err != nil { - return nil, errors.Wrap(err, "error building stage") - } - - reviewConfig(stage, &sb.cf.Config) - - sourceImage, err := mutate.Config(sb.image, sb.cf.Config) - if err != nil { - return nil, err - } - - configFile, err := sourceImage.ConfigFile() + img, err := runStage(stage, sb) if err != nil { return nil, err } - if opts.CustomPlatform == "" { - configFile.OS = runtime.GOOS - configFile.Architecture = runtime.GOARCH - } else { - configFile.OS = strings.Split(opts.CustomPlatform, "/")[0] - configFile.Architecture = strings.Split(opts.CustomPlatform, "/")[1] - } - sourceImage, err = mutate.ConfigFile(sourceImage, configFile) - if err != nil { - return nil, err + if img != nil { + // last stage + timing.DefaultRun.Stop(t) + return img, nil } - d, err := sourceImage.Digest() - if err != nil { - return nil, err - } + } + + return nil, err +} + +func runStage(stage config.KanikoStage, sb *stageBuilder) (v1.Image, error) { + newRoot, err := sb.isolator.NewRoot() + if err != nil { + return nil, fmt.Errorf("getting new root dir from isolator: %w", err) + } + + // initIgnoreList after isolation, because isolation could create new mounts that need to be respected + err = util.InitIgnoreList(true) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize ignore list") + } + + err = sb.parseCommands(newRoot) + if err != nil { + return nil, fmt.Errorf("parsing commands: %w", err) + } + + err = sb.setupSnapshotter(newRoot) + if err != nil { + return nil, fmt.Errorf("setup snapshotter: %w", err) + } + + if err := sb.build(newRoot); err != nil { + return nil, errors.Wrap(err, "error building stage") + } + + reviewConfig(stage, &sb.cf.Config) + + sourceImage, err := mutate.Config(sb.image, sb.cf.Config) + if err != nil { + return nil, err + } + + configFile, err := sourceImage.ConfigFile() + if err != nil { + return nil, err + } + if sb.opts.CustomPlatform == "" { + configFile.OS = runtime.GOOS + configFile.Architecture = runtime.GOARCH + } else { + configFile.OS = strings.Split(sb.opts.CustomPlatform, "/")[0] + configFile.Architecture = strings.Split(sb.opts.CustomPlatform, "/")[1] + } + sourceImage, err = mutate.ConfigFile(sourceImage, configFile) + if err != nil { + return nil, err + } + + d, err := sourceImage.Digest() + if err != nil { + return nil, err + } - stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String() - logrus.Debugf("Mapping stage idx %v to digest %v", sb.stage.Index, d.String()) + sb.stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String() + logrus.Debugf("Mapping stage idx %v to digest %v", sb.stage.Index, d.String()) - digestToCacheKey[d.String()] = sb.finalCacheKey - logrus.Debugf("Mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey) + sb.digestToCacheKey[d.String()] = sb.finalCacheKey + logrus.Debugf("Mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey) - if stage.Final { - sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()}) + if stage.Final { + sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()}) + if err != nil { + return nil, err + } + if sb.opts.Reproducible { + sourceImage, err = mutate.Canonical(sourceImage) if err != nil { return nil, err } - if opts.Reproducible { - sourceImage, err = mutate.Canonical(sourceImage) - if err != nil { - return nil, err - } - } - if opts.Cleanup { - if err = util.DeleteFilesystem(); err != nil { - return nil, err - } - } - timing.DefaultRun.Stop(t) - return sourceImage, nil } - if stage.SaveStage { - if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil { + if sb.opts.Cleanup { + if err = util.DeleteFilesystem(newRoot); err != nil { return nil, err } } - - filesToSave, err := filesToSave(crossStageDependencies[index]) - if err != nil { + return sourceImage, nil + } + if stage.SaveStage { + if err := saveStageAsTarball(strconv.Itoa(stage.Index), sourceImage); err != nil { return nil, err } - dstDir := filepath.Join(config.KanikoDir, strconv.Itoa(index)) - if err := os.MkdirAll(dstDir, 0644); err != nil { + } + + filesToSave, err := filesToSave(newRoot, sb.crossStageDeps[stage.Index]) + if err != nil { + return nil, err + } + dstDir := filepath.Join(config.KanikoDependencyDir, strconv.Itoa(stage.Index)) + if err := os.MkdirAll(dstDir, 0755); err != nil { + if !errors.Is(err, os.ErrExist) { return nil, errors.Wrap(err, fmt.Sprintf("to create workspace for stage %s", - stageIdxToDigest[strconv.Itoa(index)], + sb.stageIdxToDigest[strconv.Itoa(stage.Index)], )) } - for _, p := range filesToSave { - logrus.Infof("Saving file %s for later use", p) - if err := util.CopyFileOrSymlink(p, dstDir, config.RootDir); err != nil { - return nil, errors.Wrap(err, "could not save file") - } - } - - // Delete the filesystem - if err := util.DeleteFilesystem(); err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("deleting file system after stage %d", index)) + } + for _, p := range filesToSave { + logrus.Infof("Saving file %s for later use", p) + if err := util.CopyFileOrSymlink(p, dstDir, newRoot); err != nil { + return nil, errors.Wrap(err, "could not save file") } } - return nil, err + // Delete the filesystem + if err := util.DeleteFilesystem(newRoot); err != nil { + return nil, fmt.Errorf("deleting file system after stage %d: %w", stage.Index, err) + } + return nil, nil } // fileToSave returns all the files matching the given pattern in deps. // If a file is a symlink, it also returns the target file. -func filesToSave(deps []string) ([]string, error) { +func filesToSave(rootDir string, deps []string) ([]string, error) { srcFiles := []string{} for _, src := range deps { - srcs, err := filepath.Glob(filepath.Join(config.RootDir, src)) + srcs, err := filepath.Glob(filepath.Join(rootDir, src)) if err != nil { return nil, err } for _, f := range srcs { if link, err := util.EvalSymLink(f); err == nil { - link, err = filepath.Rel(config.RootDir, link) + link, err = filepath.Rel(rootDir, link) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", config.RootDir)) + return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", rootDir)) } srcFiles = append(srcFiles, link) } - f, err = filepath.Rel(config.RootDir, f) + f, err = filepath.Rel(rootDir, f) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", config.RootDir)) + return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", rootDir)) } srcFiles = append(srcFiles, f) } @@ -822,7 +931,7 @@ func fromPreviousStage(copyCommand *instructions.CopyCommand, previousStageNames func extractImageToDependencyDir(name string, image v1.Image) error { t := timing.Start("Extracting Image to Dependency Dir") defer timing.DefaultRun.Stop(t) - dependencyDir := filepath.Join(config.KanikoDir, name) + dependencyDir := filepath.Join(config.KanikoDependencyDir, name) if err := os.MkdirAll(dependencyDir, 0755); err != nil { return err } diff --git a/pkg/executor/build_test.go b/pkg/executor/build_test.go index 678e747f81..c22ca92221 100644 --- a/pkg/executor/build_test.go +++ b/pkg/executor/build_test.go @@ -31,6 +31,7 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/commands" "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/testutil" "github.com/containerd/containerd/platforms" @@ -433,12 +434,6 @@ func Test_filesToSave(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tmpDir := t.TempDir() - original := config.RootDir - config.RootDir = tmpDir - defer func() { - config.RootDir = original - }() - for _, f := range tt.files { p := filepath.Join(tmpDir, f) dir := filepath.Dir(p) @@ -454,7 +449,7 @@ func Test_filesToSave(t *testing.T) { fp.Close() } - got, err := filesToSave(tt.args) + got, err := filesToSave(tmpDir, tt.args) if err != nil { t.Errorf("got err: %s", err) } @@ -913,7 +908,7 @@ COPY %s foo.txt expectedCacheKeys: []string{copyCommandCacheKey}, // CachingCopyCommand is not pushed to the cache pushedCacheKeys: []string{}, - commands: getCommands(util.FileContext{Root: dir}, cmds, true), + commands: getCommands(util.FileContext{Root: dir}, cmds, true, dir), fileName: filename, } }(), @@ -970,7 +965,7 @@ COPY %s foo.txt rootDir: dir, expectedCacheKeys: []string{hash}, pushedCacheKeys: []string{hash}, - commands: getCommands(util.FileContext{Root: dir}, cmds, true), + commands: getCommands(util.FileContext{Root: dir}, cmds, true, dir), fileName: filename, } }(), @@ -1045,7 +1040,7 @@ COPY %s bar.txt // hash1 is the read cachekey for the first layer expectedCacheKeys: []string{hash1, hash2}, pushedCacheKeys: []string{hash2}, - commands: getCommands(util.FileContext{Root: dir}, cmds, true), + commands: getCommands(util.FileContext{Root: dir}, cmds, true, dir), } }(), func() testcase { @@ -1118,7 +1113,7 @@ RUN foobar image: image, expectedCacheKeys: []string{runHash}, pushedCacheKeys: []string{}, - commands: getCommands(util.FileContext{Root: dir}, cmds, false), + commands: getCommands(util.FileContext{Root: dir}, cmds, false, dir), } }(), func() testcase { @@ -1290,11 +1285,7 @@ RUN foobar for key, value := range tc.args { sb.args.AddArg(key, &value) } - tmp := config.RootDir - if tc.rootDir != "" { - config.RootDir = tc.rootDir - } - err := sb.build() + err := sb.build(tc.rootDir) if err != nil { t.Errorf("Expected error to be nil but was %v", err) } @@ -1302,8 +1293,6 @@ RUN foobar assertCacheKeys(t, tc.expectedCacheKeys, lc.receivedKeys, "receive") assertCacheKeys(t, tc.pushedCacheKeys, keys, "push") - config.RootDir = tmp - }) } } @@ -1331,7 +1320,7 @@ func assertCacheKeys(t *testing.T, expectedCacheKeys, actualCacheKeys []string, } } -func getCommands(fileContext util.FileContext, cmds []instructions.Command, cacheCopy bool) []commands.DockerCommand { +func getCommands(fileContext util.FileContext, cmds []instructions.Command, cacheCopy bool, rootDir string) []commands.DockerCommand { outCommands := make([]commands.DockerCommand, 0) for _, c := range cmds { cmd, err := commands.GetCommand( @@ -1339,6 +1328,8 @@ func getCommands(fileContext util.FileContext, cmds []instructions.Command, cach fileContext, false, cacheCopy, + rootDir, + isolation.None{}, ) if err != nil { panic(err) @@ -1434,7 +1425,7 @@ func Test_stageBuild_populateCompositeKeyForCopyCommand(t *testing.T) { } fc := util.FileContext{Root: "workspace"} - copyCommand, err := commands.GetCommand(instructions[0], fc, false, true) + copyCommand, err := commands.GetCommand(instructions[0], fc, false, true, "", isolation.None{}) if err != nil { t.Fatal(err) } diff --git a/pkg/executor/copy_multistage_test.go b/pkg/executor/copy_multistage_test.go index 3d13da6c5f..e21532a4c4 100644 --- a/pkg/executor/copy_multistage_test.go +++ b/pkg/executor/copy_multistage_test.go @@ -24,13 +24,25 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/isolation" "github.com/GoogleContainerTools/kaniko/testutil" ) func TestCopyCommand_Multistage(t *testing.T) { + t.Run("copy a file across multistage", func(t *testing.T) { testDir, fn := setupMultistageTests(t) defer fn() + + // override isolation creator func + original := newIsolator + newIsolator = func(isoType string) isolation.Isolator { + return fakeIsolator{dir: testDir} + } + defer func() { + newIsolator = original + }() + dockerFile := fmt.Sprintf(` FROM scratch as first COPY foo/bam.txt copied/ @@ -45,7 +57,9 @@ COPY --from=first copied/bam.txt output/bam.txt`) SnapshotMode: constants.SnapshotModeFull, } _, err := DoBuild(opts) - testutil.CheckNoError(t, err) + if err != nil { + t.Fatal(err) + } // Check Image has one layer bam.txt files, err := ioutil.ReadDir(filepath.Join(testDir, "output")) if err != nil { @@ -59,6 +73,16 @@ COPY --from=first copied/bam.txt output/bam.txt`) t.Run("copy a file across multistage into a directory", func(t *testing.T) { testDir, fn := setupMultistageTests(t) defer fn() + + // override isolation creator func + original := newIsolator + newIsolator = func(isoType string) isolation.Isolator { + return fakeIsolator{dir: testDir} + } + defer func() { + newIsolator = original + }() + dockerFile := fmt.Sprintf(` FROM scratch as first COPY foo/bam.txt copied/ @@ -73,7 +97,9 @@ COPY --from=first copied/bam.txt output/`) SnapshotMode: constants.SnapshotModeFull, } _, err := DoBuild(opts) - testutil.CheckNoError(t, err) + if err != nil { + t.Fatal(err) + } files, err := ioutil.ReadDir(filepath.Join(testDir, "output")) if err != nil { t.Fatal(err) @@ -84,6 +110,16 @@ COPY --from=first copied/bam.txt output/`) t.Run("copy directory across multistage into a directory", func(t *testing.T) { testDir, fn := setupMultistageTests(t) defer fn() + + // override isolation creator func + original := newIsolator + newIsolator = func(isoType string) isolation.Isolator { + return fakeIsolator{dir: testDir} + } + defer func() { + newIsolator = original + }() + dockerFile := fmt.Sprintf(` FROM scratch as first COPY foo copied @@ -98,7 +134,9 @@ COPY --from=first copied another`) SnapshotMode: constants.SnapshotModeFull, } _, err := DoBuild(opts) - testutil.CheckNoError(t, err) + if err != nil { + t.Fatal(err) + } // Check Image has one layer bam.txt files, err := ioutil.ReadDir(filepath.Join(testDir, "another")) if err != nil { @@ -133,11 +171,17 @@ func setupMultistageTests(t *testing.T) (string, func()) { // - exec.link -> ../exec // exec - // Make directory for stage or else the executor will create with permissions 0664 - // and we will run into issue https://github.com/golang/go/issues/22323 - if err := os.MkdirAll(filepath.Join(testDir, "kaniko/0"), 0755); err != nil { + originalKanikoDepsDir := config.KanikoDependencyDir + originalKanikoStagesDir := config.KanikoIntermediateStagesDir + config.KanikoDependencyDir = filepath.Join(testDir, "kaniko", "saved") + config.KanikoIntermediateStagesDir = filepath.Join(testDir, "kaniko", "stages") + + kanikoDir := filepath.Join(testDir, "kaniko") + // Make kanikoDir + if err := os.MkdirAll(kanikoDir, 0755); err != nil { t.Fatal(err) } + workspace := filepath.Join(testDir, "workspace") // Make foo if err := os.MkdirAll(filepath.Join(workspace, "foo"), 0755); err != nil { @@ -161,7 +205,6 @@ func setupMultistageTests(t *testing.T) (string, func()) { os.Symlink("../exec", filepath.Join(workspace, "bin", "exec.link")) // set up config - config.RootDir = testDir config.KanikoDir = fmt.Sprintf("%s/%s", testDir, "kaniko") // Write path to ignore list if err := os.MkdirAll(filepath.Join(testDir, "proc"), 0755); err != nil { @@ -177,7 +220,8 @@ func setupMultistageTests(t *testing.T) (string, func()) { } config.IgnoreListPath = mFile return testDir, func() { - config.RootDir = constants.RootDir config.IgnoreListPath = constants.IgnoreListPath + config.KanikoIntermediateStagesDir = originalKanikoStagesDir + config.KanikoDependencyDir = originalKanikoDepsDir } } diff --git a/pkg/executor/fakes.go b/pkg/executor/fakes.go index 4b8dec1c6a..fc8374df55 100644 --- a/pkg/executor/fakes.go +++ b/pkg/executor/fakes.go @@ -22,6 +22,7 @@ import ( "errors" "io" "io/ioutil" + "os/exec" "github.com/GoogleContainerTools/kaniko/pkg/commands" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" @@ -195,3 +196,17 @@ func (f fakeImage) LayerByDigest(v1.Hash) (v1.Layer, error) { func (f fakeImage) LayerByDiffID(v1.Hash) (v1.Layer, error) { return fakeLayer{}, nil } + +// fakeIsolator isolator is used for testing +// fakeIsolator.dir will be used as the new root +type fakeIsolator struct { + dir string +} + +func (t fakeIsolator) NewRoot() (newRoot string, err error) { + return t.dir, err +} + +func (t fakeIsolator) ExecRunCommand(cmd *exec.Cmd) error { + return nil +} diff --git a/pkg/filesystem/resolve.go b/pkg/filesystem/resolve.go index 648f13020e..ba17c94d8c 100644 --- a/pkg/filesystem/resolve.go +++ b/pkg/filesystem/resolve.go @@ -20,7 +20,6 @@ import ( "os" "path/filepath" - "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -34,7 +33,7 @@ import ( // * If path is a symlink, resolve it's target. If the target is not ignored add it to the // output set. // * Add all ancestors of each path to the output set. -func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) { +func ResolvePaths(rootDir string, paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) { logrus.Tracef("Resolving paths %s", paths) fileSet := make(map[string]bool) @@ -46,7 +45,7 @@ func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []strin continue } - link, e := resolveSymlinkAncestor(f) + link, e := resolveSymlinkAncestor(rootDir, f) if e != nil { continue } @@ -92,20 +91,20 @@ func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []strin } // Also add parent directories to keep the permission of them correctly. - pathsToAdd = filesWithParentDirs(pathsToAdd) + pathsToAdd = filesWithParentDirs(rootDir, pathsToAdd) return } // filesWithParentDirs returns every ancestor path for each provided file path. // I.E. /foo/bar/baz/boom.txt => [/, /foo, /foo/bar, /foo/bar/baz, /foo/bar/baz/boom.txt] -func filesWithParentDirs(files []string) []string { +func filesWithParentDirs(rootDir string, files []string) []string { filesSet := map[string]bool{} for _, file := range files { file = filepath.Clean(file) filesSet[file] = true - for _, dir := range util.ParentDirectories(file) { + for _, dir := range util.ParentDirectories(rootDir, file) { dir = filepath.Clean(dir) filesSet[dir] = true } @@ -124,7 +123,7 @@ func filesWithParentDirs(files []string) []string { // E.G /baz/boom/bar.txt links to /usr/bin/bar.txt but /baz/boom/bar.txt itself is not a link. // Instead /bar/boom is actually a link to /usr/bin. In this case resolveSymlinkAncestor would // return /bar/boom. -func resolveSymlinkAncestor(path string) (string, error) { +func resolveSymlinkAncestor(rootDir, path string) (string, error) { if !filepath.IsAbs(path) { return "", errors.New("dest path must be abs") } @@ -133,7 +132,7 @@ func resolveSymlinkAncestor(path string) (string, error) { newPath := filepath.Clean(path) loop: - for newPath != config.RootDir { + for newPath != rootDir { fi, err := os.Lstat(newPath) if err != nil { return "", errors.Wrap(err, "resolvePaths: failed to lstat") diff --git a/pkg/filesystem/resolve_test.go b/pkg/filesystem/resolve_test.go index 751bd2f6cc..21ee90f53c 100644 --- a/pkg/filesystem/resolve_test.go +++ b/pkg/filesystem/resolve_test.go @@ -95,9 +95,9 @@ func Test_ResolvePaths(t *testing.T) { expectedFiles = append(expectedFiles, target) } - expectedFiles = filesWithParentDirs(expectedFiles) + expectedFiles = filesWithParentDirs(dir, expectedFiles) - files, err := ResolvePaths(inputFiles, wl) + files, err := ResolvePaths(dir, inputFiles, wl) validateResults(t, files, expectedFiles, err) }) @@ -159,9 +159,9 @@ func Test_ResolvePaths(t *testing.T) { targetFile := filepath.Join(target, "meow.txt") expectedFiles = append(expectedFiles, targetFile) - expectedFiles = filesWithParentDirs(expectedFiles) + expectedFiles = filesWithParentDirs(dir, expectedFiles) - files, err := ResolvePaths(inputFiles, wl) + files, err := ResolvePaths(dir, inputFiles, wl) validateResults(t, files, expectedFiles, err) }) @@ -169,12 +169,13 @@ func Test_ResolvePaths(t *testing.T) { }) t.Run("empty set of files", func(t *testing.T) { + dir := t.TempDir() inputFiles := []string{} expectedFiles := []string{} wl := []util.IgnoreListEntry{} - files, err := ResolvePaths(inputFiles, wl) + files, err := ResolvePaths(dir, inputFiles, wl) validateResults(t, files, expectedFiles, err) }) @@ -217,7 +218,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := linkPath - actual, err := resolveSymlinkAncestor(linkPath) + actual, err := resolveSymlinkAncestor(testDir, linkPath) if err != nil { t.Errorf("expected err to be nil but was %s", err) } @@ -238,7 +239,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := linkDir - actual, err := resolveSymlinkAncestor(fmt.Sprintf("%s/", linkDir)) + actual, err := resolveSymlinkAncestor(testDir, fmt.Sprintf("%s/", linkDir)) if err != nil { t.Errorf("expected err to be nil but was %s", err) } @@ -270,7 +271,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := linkPath - actual, err := resolveSymlinkAncestor(linkPath) + actual, err := resolveSymlinkAncestor(testDir, linkPath) if err != nil { t.Errorf("expected err to be nil but was %s", err) } @@ -286,7 +287,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := targetPath - actual, err := resolveSymlinkAncestor(targetPath) + actual, err := resolveSymlinkAncestor(testDir, targetPath) if err != nil { t.Errorf("expected err to be nil but was %s", err) } @@ -318,7 +319,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := linkDir - actual, err := resolveSymlinkAncestor(linkPath) + actual, err := resolveSymlinkAncestor(testDir, linkPath) if err != nil { t.Errorf("expected err to be nil but was %s", err) } @@ -352,7 +353,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { linkPath := filepath.Join(linkDir, filepath.Base(targetPath)) - _, err := resolveSymlinkAncestor(linkPath) + _, err := resolveSymlinkAncestor(testDir, linkPath) if err == nil { t.Error("expected err to not be nil") } @@ -380,7 +381,7 @@ func Test_resolveSymlinkAncestor(t *testing.T) { expected := linkDir - actual, err := resolveSymlinkAncestor(linkPath) + actual, err := resolveSymlinkAncestor(testDir, linkPath) if err != nil { t.Errorf("expected err to be nil but was %s", err) } diff --git a/pkg/idtools/mappings.go b/pkg/idtools/mappings.go new file mode 100644 index 0000000000..57cee6ca78 --- /dev/null +++ b/pkg/idtools/mappings.go @@ -0,0 +1,15 @@ +package idtools + +const ( + subuidFile = "/etc/subuid" + subgidFile = "/etc/subgid" +) + +type Mapping struct { + // ContainerID is the starting ID in the user namespace + ContainerID uint32 + // HostID is the starting ID outside of the user namespace + HostID uint32 + // Size is the number of IDs that can be mapped on top of ContainerID + Size uint32 +} diff --git a/pkg/idtools/mappings_linux.go b/pkg/idtools/mappings_linux.go new file mode 100644 index 0000000000..c51e78c651 --- /dev/null +++ b/pkg/idtools/mappings_linux.go @@ -0,0 +1,225 @@ +//go:build linux +// +build linux + +package idtools + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" +) + +func hasSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + +// SetUidMap executes newuidmap with mapping defined in uidmap +func SetUidMap(pid int, uidmap []Mapping) error { + path, err := exec.LookPath("newuidmap") + if err != nil { + return fmt.Errorf("finding newgidmap: %w", err) + } + err = runNewIDMap( + path, + fmt.Sprintf("%d", pid), + uidmap, + ) + if err != nil { + ok, err := hasSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + return fmt.Errorf("determining if %v has setuid cap: %w", path, err) + } + if !ok { + return fmt.Errorf("%v failed because setuid was not set on the file nor had the capabiltity", path) + } + } + return nil +} + +// SetUidMap executes newgidmap with mapping defined in gidmap +func SetGidMap(pid int, gidmap []Mapping) error { + path, err := exec.LookPath("newgidmap") + if err != nil { + return fmt.Errorf("finding newgidmap: %w", err) + } + err = runNewIDMap( + path, + fmt.Sprintf("%d", pid), + gidmap, + ) + if err != nil { + ok, err := hasSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + return fmt.Errorf("determining if %v has Setgid cap: %w", path, err) + } + if !ok { + return fmt.Errorf("%v failed because Setgid was not set on the file", path) + } + } + return nil +} + +func runNewIDMap(path, pid string, mappings []Mapping) error { + // newuidmap and newgidmap are only allowed once per process + var mapStr string + for _, m := range mappings { + mapStr += fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + args := []string{ + pid, + } + // replace \n with " " because newuidmap expects it that way + args = append(args, strings.Fields(strings.ReplaceAll(mapStr, "\n", " "))...) + + cmd := exec.Command(path, args...) + logrus.Infof("running %s", cmd) + + output := new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = output, output + err := cmd.Run() + if err != nil { + return fmt.Errorf("%v failed: %s %w", path, output.String(), err) + } + return nil +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]Mapping, error) { + var mappings []Mapping + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) + } + mappings = append(mappings, Mapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]Mapping, []Mapping, error) { + if pid == "" { + pid = "self" + } + uidmapPath := fmt.Sprintf("/proc/%v/uid_map", pid) + uidmap, err := getHostIDMappings(uidmapPath) + if err != nil { + return nil, nil, err + } + gidmapPath := fmt.Sprintf("/proc/%v/gid_map", pid) + gidmap, err := getHostIDMappings(gidmapPath) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(uid, gid uint32, user, group string) ([]Mapping, []Mapping, error) { + return newIDMappings(uid, gid, user, group) +} + +func newIDMappings(uid, gid uint32, username, group string) (uidmap []Mapping, gidmap []Mapping, err error) { + uidFile, err := os.Open(subuidFile) + if err != nil { + return uidmap, gidmap, err + } + defer uidFile.Close() + uidmap, err = getMappingFromSubFile(uid, username, uidFile) + if err != nil { + return uidmap, gidmap, fmt.Errorf("get mapping from %v for user %v: %w", subuidFile, username, err) + } + + gidFile, err := os.Open(subgidFile) + if err != nil { + return uidmap, gidmap, err + } + defer gidFile.Close() + gidmap, err = getMappingFromSubFile(gid, group, gidFile) + if err != nil { + return uidmap, gidmap, fmt.Errorf("get mapping from %v for user %v: %w", subuidFile, username, err) + } + return +} + +func getMappingFromSubFile(uidOrGid uint32, userOrGroup string, r io.Reader) ([]Mapping, error) { + // /etc/sub{uid,gid} is of the following format + // USERNAME_OR_GROUP:START_UID_IN_USERNAMESPACE:SIZE + scanner := bufio.NewScanner(r) + maps := []Mapping{} + for { + if !scanner.Scan() { + break + } + line := scanner.Text() + if line == "" { + // skip empty lines + continue + } + parts := strings.Split(line, ":") + if len(parts) != 3 { + return nil, errors.New("content of reader is in wrong format") + } + if parts[0] == userOrGroup || userOrGroup == "ALL" { + containerID, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, err + } + size, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, err + } + maps = append(maps, Mapping{ + HostID: uint32(containerID), + ContainerID: uidOrGid, + Size: uint32(size), + }) + } + } + return maps, nil +} diff --git a/pkg/idtools/mappings_linux_test.go b/pkg/idtools/mappings_linux_test.go new file mode 100644 index 0000000000..0eaa98ed48 --- /dev/null +++ b/pkg/idtools/mappings_linux_test.go @@ -0,0 +1,81 @@ +//go:build linux +// +build linux + +package idtools + +import ( + "bytes" + "reflect" + "testing" +) + +func Test_getMappingFromSubFile(t *testing.T) { + type args struct { + uidOrGid uint32 + userOrGroup string + idFileContent string + } + tests := []struct { + name string + args args + want []Mapping + wantErr bool + }{ + { + name: "default id file", + args: args{ + uidOrGid: 0, + userOrGroup: "foo", + idFileContent: sampleIDContent, + }, + want: []Mapping{ + { + ContainerID: 100000, + HostID: 0, + Size: 65536, + }, + }, + wantErr: false, + }, + { + name: "user not in file", + args: args{ + uidOrGid: 0, + userOrGroup: "baz", + idFileContent: sampleIDContent, + }, + wantErr: false, + want: []Mapping{}, + }, + { + name: "malformed id file", + args: args{ + uidOrGid: 0, + userOrGroup: "foo", + idFileContent: malformedIDContent, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := bytes.NewReader([]byte(tt.args.idFileContent)) + got, err := getMappingFromSubFile(tt.args.uidOrGid, tt.args.userOrGroup, reader) + if (err != nil) != tt.wantErr { + t.Errorf("getMappingFromSubFile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getMappingFromSubFile() = %v, want %v", got, tt.want) + } + }) + } +} + +var sampleIDContent = ` +foo:100000:65536 +` + +var malformedIDContent = ` +:100000:::65536 +` diff --git a/pkg/idtools/mappings_unsupported.go b/pkg/idtools/mappings_unsupported.go new file mode 100644 index 0000000000..b419e84687 --- /dev/null +++ b/pkg/idtools/mappings_unsupported.go @@ -0,0 +1,18 @@ +//go:build !linux +// +build !linux + +package idtools + +import "errors" + +func SetUidMap(pid int, uidmap Mapping) error { + return errors.New("SetUidMap is only supported on linux") +} + +func SetGidMap(pid int, gidmap Mapping) error { + return errors.New("SetGidMap is only supported on linux") +} + +func GetSubIDMappings(uid, gid uint32, user, group string) (Mapping, Mapping, error) { + return errors.New("GetSubIDMappings is only supported on linux") +} diff --git a/pkg/isolation/isolation.go b/pkg/isolation/isolation.go new file mode 100644 index 0000000000..f11a9f0c7e --- /dev/null +++ b/pkg/isolation/isolation.go @@ -0,0 +1,68 @@ +package isolation + +import ( + "fmt" + "os/exec" + "syscall" + + "github.com/GoogleContainerTools/kaniko/pkg/chroot" + "github.com/pkg/errors" +) + +type Isolator interface { + NewRoot() (newRoot string, err error) + ExecRunCommand(cmd *exec.Cmd) error +} + +type Chroot struct { + // rootDir holds the dir created by NewRoot + rootDir string +} + +var _ Isolator = &Chroot{} + +func (c *Chroot) NewRoot() (newRoot string, err error) { + newRoot, err = chroot.TmpDirInHome() + if err != nil { + return "", fmt.Errorf("getting newRoot: %w", err) + } + c.rootDir = newRoot + return newRoot, nil +} + +func (c *Chroot) ExecRunCommand(cmd *exec.Cmd) (err error) { + if c.rootDir == "" { + return errors.New("NewRoot() was not executed beforehand") + } + err = chroot.Run(cmd, c.rootDir) + if err != nil { + return fmt.Errorf("running command in chroot env: %w", err) + } + return nil +} + +type None struct{} + +func (n None) NewRoot() (newRoot string, err error) { + return "/", nil +} + +func (n None) ExecRunCommand(cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return errors.Wrap(err, "starting command") + } + + pgid, err := syscall.Getpgid(cmd.Process.Pid) + if err != nil { + return errors.Wrap(err, "getting group id for process") + } + if err := cmd.Wait(); err != nil { + return errors.Wrap(err, "waiting for process to exit") + } + + //it's not an error if there are no grandchildren + if err := syscall.Kill(-pgid, syscall.SIGKILL); err != nil && err.Error() != "no such process" { + return err + } + return nil +} diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 9ed0697a33..f1747a0b81 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -74,7 +74,7 @@ func (s *Snapshotter) TakeSnapshot(files []string, shdCheckDelete bool, forceBui return "", nil } - filesToAdd, err := filesystem.ResolvePaths(files, s.ignorelist) + filesToAdd, err := filesystem.ResolvePaths(s.directory, files, s.ignorelist) if err != nil { return "", err } @@ -82,7 +82,7 @@ func (s *Snapshotter) TakeSnapshot(files []string, shdCheckDelete bool, forceBui logrus.Info("Taking snapshot of files...") sort.Strings(filesToAdd) - logrus.Debugf("Adding to layer: %v", filesToAdd) + logrus.Tracef("Adding to layer: %v", filesToAdd) // Add files to current layer. for _, file := range filesToAdd { @@ -110,9 +110,9 @@ func (s *Snapshotter) TakeSnapshot(files []string, shdCheckDelete bool, forceBui sort.Strings(filesToWhiteout) } - t := util.NewTar(f) + t := util.NewTar(s.directory, f) defer t.Close() - if err := writeToTar(t, filesToAdd, filesToWhiteout); err != nil { + if err := writeToTar(s.directory, t, filesToAdd, filesToWhiteout); err != nil { return "", err } return f.Name(), nil @@ -126,7 +126,7 @@ func (s *Snapshotter) TakeSnapshotFS() (string, error) { return "", err } defer f.Close() - t := util.NewTar(f) + t := util.NewTar(s.directory, f) defer t.Close() filesToAdd, filesToWhiteOut, err := s.scanFullFilesystem() @@ -134,7 +134,7 @@ func (s *Snapshotter) TakeSnapshotFS() (string, error) { return "", err } - if err := writeToTar(t, filesToAdd, filesToWhiteOut); err != nil { + if err := writeToTar(s.directory, t, filesToAdd, filesToWhiteOut); err != nil { return "", err } return f.Name(), nil @@ -158,13 +158,13 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { s.l.Snapshot() - logrus.Debugf("Current image filesystem: %v", s.l.currentImage) + logrus.Tracef("Current image filesystem: %v", s.l.currentImage) changedPaths, deletedPaths := util.WalkFS(s.directory, s.l.GetCurrentPaths(), s.l.CheckFileChange) timer := timing.Start("Resolving Paths") filesToAdd := []string{} - resolvedFiles, err := filesystem.ResolvePaths(changedPaths, s.ignorelist) + resolvedFiles, err := filesystem.ResolvePaths(s.directory, changedPaths, s.ignorelist) if err != nil { return nil, nil, err } @@ -176,8 +176,9 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { filesToAdd = append(filesToAdd, path) } - logrus.Debugf("Adding to layer: %v", filesToAdd) - logrus.Debugf("Deleting in layer: %v", deletedPaths) + // can spam the whole log + logrus.Tracef("Adding to layer: %v", filesToAdd) + logrus.Tracef("Deleting in layer: %v", deletedPaths) // Add files to the layered map for _, file := range filesToAdd { @@ -215,7 +216,7 @@ func removeObsoleteWhiteouts(deletedFiles map[string]struct{}) (filesToWhiteout return filesToWhiteout } -func writeToTar(t util.Tar, files, whiteouts []string) error { +func writeToTar(rootDir string, t util.Tar, files, whiteouts []string) error { timer := timing.Start("Writing tar file") defer timing.DefaultRun.Stop(timer) @@ -223,7 +224,7 @@ func writeToTar(t util.Tar, files, whiteouts []string) error { addedPaths := make(map[string]bool) for _, path := range whiteouts { - if err := addParentDirectories(t, addedPaths, path); err != nil { + if err := addParentDirectories(rootDir, t, addedPaths, path); err != nil { return err } if err := t.Whiteout(path); err != nil { @@ -232,7 +233,7 @@ func writeToTar(t util.Tar, files, whiteouts []string) error { } for _, path := range files { - if err := addParentDirectories(t, addedPaths, path); err != nil { + if err := addParentDirectories(rootDir, t, addedPaths, path); err != nil { return err } if _, pathAdded := addedPaths[path]; pathAdded { @@ -246,8 +247,8 @@ func writeToTar(t util.Tar, files, whiteouts []string) error { return nil } -func addParentDirectories(t util.Tar, addedPaths map[string]bool, path string) error { - for _, parentPath := range util.ParentDirectories(path) { +func addParentDirectories(rootDir string, t util.Tar, addedPaths map[string]bool, path string) error { + for _, parentPath := range util.ParentDirectories(rootDir, path) { if _, pathAdded := addedPaths[parentPath]; pathAdded { continue } diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 2dc6055928..5404d032f2 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -33,7 +33,6 @@ import ( func TestSnapshotFSFileChange(t *testing.T) { testDir, snapshotter, cleanup, err := setUpTest(t) - testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") defer cleanup() if err != nil { t.Fatal(err) @@ -58,13 +57,11 @@ func TestSnapshotFSFileChange(t *testing.T) { } // Check contents of the snapshot, make sure contents is equivalent to snapshotFiles tr := tar.NewReader(f) - fooPath := filepath.Join(testDirWithoutLeadingSlash, "foo") - batPath := filepath.Join(testDirWithoutLeadingSlash, "bar/bat") snapshotFiles := map[string]string{ - fooPath: "newbaz1", - batPath: "baz", + "foo": "newbaz1", + "bar/bat": "baz", } - for _, path := range util.ParentDirectoriesWithoutLeadingSlash(batPath) { + for _, path := range util.ParentDirectoriesWithoutLeadingSlash("bar/bat") { if path == "/" { snapshotFiles["/"] = "" continue @@ -129,14 +126,13 @@ func TestSnapshotFSIsReproducible(t *testing.T) { func TestSnapshotFSChangePermissions(t *testing.T) { testDir, snapshotter, cleanup, err := setUpTest(t) - testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") defer cleanup() if err != nil { t.Fatal(err) } // Change permissions on a file - batPath := filepath.Join(testDir, "bar/bat") - batPathWithoutLeadingSlash := filepath.Join(testDirWithoutLeadingSlash, "bar/bat") + batBaseName := "bar/bat" + batPath := filepath.Join(testDir, batBaseName) if err := os.Chmod(batPath, 0600); err != nil { t.Fatalf("Error changing permissions on %s: %v", batPath, err) } @@ -152,9 +148,9 @@ func TestSnapshotFSChangePermissions(t *testing.T) { // Check contents of the snapshot, make sure contents is equivalent to snapshotFiles tr := tar.NewReader(f) snapshotFiles := map[string]string{ - batPathWithoutLeadingSlash: "baz2", + batBaseName: "baz2", } - for _, path := range util.ParentDirectoriesWithoutLeadingSlash(batPathWithoutLeadingSlash) { + for _, path := range util.ParentDirectoriesWithoutLeadingSlash(batBaseName) { if path == "/" { snapshotFiles["/"] = "" continue @@ -190,7 +186,6 @@ func TestSnapshotFSChangePermissions(t *testing.T) { func TestSnapshotFiles(t *testing.T) { testDir, snapshotter, cleanup, err := setUpTest(t) - testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") defer cleanup() if err != nil { t.Fatal(err) @@ -212,9 +207,9 @@ func TestSnapshotFiles(t *testing.T) { defer os.Remove(tarPath) expectedFiles := []string{ - filepath.Join(testDirWithoutLeadingSlash, "foo"), + "foo", } - for _, path := range util.ParentDirectoriesWithoutLeadingSlash(filepath.Join(testDir, "foo")) { + for _, path := range util.ParentDirectoriesWithoutLeadingSlash("foo") { expectedFiles = append(expectedFiles, strings.TrimRight(path, "/")+"/") } @@ -446,20 +441,13 @@ func TestSnapshotIncludesParentDirBeforeWhiteoutFile(t *testing.T) { t.Fatal(err) } - testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") expectedFiles := []string{ - filepath.Join(testDirWithoutLeadingSlash, "kaniko/.wh.file"), - filepath.Join(testDirWithoutLeadingSlash, "kaniko/new-file"), - filepath.Join(testDirWithoutLeadingSlash, ".wh.bar"), "/", + ".wh.bar", + "kaniko/", + "kaniko/.wh.file", + "kaniko/new-file", } - for parentDir := filepath.Dir(expectedFiles[0]); parentDir != "."; parentDir = filepath.Dir(parentDir) { - expectedFiles = append(expectedFiles, parentDir+"/") - } - - // Sorting does the right thing in this case. The expected order for a directory is: - // Parent dirs first, then whiteout files in the directory, then other files in that directory - sort.Strings(expectedFiles) testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles) } diff --git a/pkg/unshare/doc.go b/pkg/unshare/doc.go new file mode 100644 index 0000000000..6c9e3a289e --- /dev/null +++ b/pkg/unshare/doc.go @@ -0,0 +1,4 @@ +package unshare + +// This implementation of unshare is based on the package "unshare" from containers/storage. +// The main difference is, that this one does not use cgo. diff --git a/pkg/unshare/unshare_linux.go b/pkg/unshare/unshare_linux.go new file mode 100644 index 0000000000..b2f9bb3437 --- /dev/null +++ b/pkg/unshare/unshare_linux.go @@ -0,0 +1,280 @@ +//go:build linux +// +build linux + +package unshare + +import ( + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strconv" + "syscall" + + "github.com/GoogleContainerTools/kaniko/pkg/idtools" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/docker/docker/pkg/reexec" + "github.com/sirupsen/logrus" +) + +const ( + unshareReexecKey = "unshare-reexec" + continuePipeKey = "_kaniko_continue_pipe" + pidPipeKey = "_kaniko_pid_pipe" + // insideUnshareCommandKey will be set to every command that is run by unshare.Command. + // this will make sure, that childWait() will be executed in child. + insideUnshareCommandKey = "_kaniko_unshare_command" +) + +type Cmd struct { + *exec.Cmd +} + +// Command will create a new Cmd with a reexec.Command set to args. +// +// Also set SysProcAttr.UnshareFlags to unshareFlags. +// Use 0 if you don't want to create any namespaces. +// +// Make sure that reexec.Init() will be called in your program. +func Command(unshareFlags int, args ...string) *Cmd { + c := &Cmd{ + Cmd: reexec.Command(args...), + } + // SysProcAttr will always be created from reexec.Command() + // so don't worry about nil pointer + c.SysProcAttr.Unshareflags = uintptr(unshareFlags) + return c +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) Start() error { + // Create a pipe for getting the pid of child. + // Use this method instead of checking in the parent, because we wouldn't + // know when the child is ready + pidReader, pidWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("error creating pid pipes: %w", err) + } + defer pidReader.Close() + defer func() { + if pidWriter != nil { + pidWriter.Close() + } + }() + + // Create a pipe signaling the child to continue + // Child will wait until something is sent over this pipe + continueReader, continueWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("error creating pid pipes: %w", err) + } + defer func() { + if continueReader != nil { + continueReader.Close() + } + }() + defer continueWriter.Close() + + // create env before appending files, because len(ExtraFIles) would be wrong otherwise + c.Env = append(c.Env, fmt.Sprintf("%s=%d", pidPipeKey, len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWriter) + c.Env = append(c.Env, fmt.Sprintf("%s=%d", continuePipeKey, len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueReader) + + // set insideUnshareCommandKey to signal child that it needs to execute childWait() + c.Env = append(c.Env, fmt.Sprintf("%s=%d", insideUnshareCommandKey, 1)) + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueReader.Close() + continueReader = nil + pidWriter.Close() + pidWriter = nil + + pidbuf := make([]byte, 8) + + n, err := pidReader.Read(pidbuf) + if err != nil { + err = fmt.Errorf("reading pid from child pipe: %w", err) + fmt.Fprint(continueWriter, err) + return err + } + + pid, err := strconv.Atoi(string(pidbuf[:n])) + if err != nil { + err = fmt.Errorf("converting pid from child to integer: %w", err) + fmt.Fprint(continueWriter, err) + return err + } + + // only create additional mappings if creating user namespace + if c.SysProcAttr.Unshareflags&syscall.CLONE_NEWUSER != 0 { + uid := os.Getuid() + gid := os.Getgid() + + u, err := util.LookupUser("/", fmt.Sprint(uid)) + if err != nil { + err = fmt.Errorf("lookup user for %v: %w", uid, err) + fmt.Fprint(continueWriter, err) + return err + } + + group, err := util.LookupGroup("/", fmt.Sprint(gid)) + if err != nil { + err = fmt.Errorf("lookup group for %v: %w", gid, err) + fmt.Fprint(continueWriter, err) + return err + } + + uidmap, gidmap, err := idtools.GetSubIDMappings(uint32(uid), uint32(gid), u.Username, group.Name) + if err != nil { + logrus.Warnf("getting subid mappings failed, fall back to single mapping") + } + // Map our UID and GID, then the subuid and subgid ranges, + // consecutively, starting at 0, to get the mappings to use for + // a copy of ourselves. + uidmap = append([]idtools.Mapping{{HostID: uint32(uid), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]idtools.Mapping{{HostID: uint32(gid), ContainerID: 0, Size: 1}}, gidmap...) + + if err = idtools.SetUidMap(pid, uidmap); err != nil { + err = fmt.Errorf("apply subuid mapping: %w", err) + fmt.Fprint(continueWriter, err) + return err + } + + // disable ability of process pid to call setgroups() syscall + if err = writeToSetGroups(pid, "deny"); err != nil { + err = fmt.Errorf("write deny to setgroups: %w", err) + fmt.Fprint(continueWriter, err) + return err + } + + if err = idtools.SetGidMap(pid, gidmap); err != nil { + err = fmt.Errorf("apply subgid mapping: %w", err) + fmt.Fprint(continueWriter, err) + return err + } + } + + // nothing went wrong, so lets continue child + _, err = fmt.Fprint(continueWriter, "continue") + if err != nil { + return fmt.Errorf("writing to child continue pipe: %w", err) + } + return nil +} + +// childWait will be executed before any unshare Command is run. +func childWait() { + runtime.LockOSThread() + // early return if we were not called from unshare.Command() + if os.Getenv(insideUnshareCommandKey) == "" { + // unlock threads if we are not in unshared environment + runtime.UnlockOSThread() + return + } + + pidStr := fmt.Sprint(os.Getpid()) + pidPipe, err := getPipeFromKey(pidPipeKey) + if err != nil { + fmt.Fprintf(os.Stderr, "error getting pid pipe: %v\n", err) + os.Exit(1) + } + defer pidPipe.Close() + + _, err = io.WriteString(pidPipe, pidStr) + if err != nil { + fmt.Fprintf(os.Stderr, "error writing pid to pidpipe: %v\n", err) + os.Exit(1) + } + + err = waitForContinue() + if err != nil { + fmt.Fprintf(os.Stderr, "error writing pid to pidpipe: %v\n", err) + os.Exit(1) + } +} + +// writeSetGroup will write val to /proc/PID/setgroups +// +// Since Linux 3.19 unprivileged writing of /proc/self/gid_map +// has been disabled unless /proc/self/setgroups is written +// first to permanently disable the ability to call setgroups +// in that user namespace. +func writeToSetGroups(pid int, val string) error { + path := fmt.Sprintf("/proc/%d/setgroups", pid) + f, err := os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write([]byte(val)) + if err != nil { + return fmt.Errorf("writing %v to %v: %w", val, path, err) + } + return nil +} + +// write to proc map file will write 0 {UID,GID} 1 to /proc/pid/{uid,gid}_map +// fileType is uid_map or gid_map +func writeToProcMapFile(pid int, uidOrGid uint32, fileType string) error { + path := fmt.Sprintf("/proc/%d/%s", pid, fileType) + f, err := os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + + mapStr := fmt.Sprintf("0 %d 1", pid) + _, err = f.Write([]byte(mapStr)) + if err != nil { + return fmt.Errorf("writing to %v: %w", path, err) + } + return nil +} + +// waitForContinue will block until we read something from the continue pipe. +// This pipe will be used by the parent if it errors or child can continue execution +func waitForContinue() error { + continuePipe, err := getPipeFromKey(continuePipeKey) + if err != nil { + return fmt.Errorf("creating continue pipe: %w", err) + } + defer continuePipe.Close() + buf := make([]byte, 1024) + // use read instead of readall because pipe wont send EOF + _, err = continuePipe.Read(buf) + if err != nil { + return fmt.Errorf("reading from continue pipe: %w", err) + } + return nil +} + +func getPipeFromKey(key string) (*os.File, error) { + fdStr := os.Getenv(key) + if fdStr == "" { + return nil, fmt.Errorf("%v is not set, can't create pipe", key) + } + fd, err := strconv.Atoi(fdStr) + if err != nil { + return nil, fmt.Errorf("converting %v to integer: %w", fdStr, err) + } + return os.NewFile(uintptr(fd), key), nil +} + +func init() { + // childWait will always be executed on startup + childWait() +} diff --git a/pkg/unshare/unshare_linux_test.go b/pkg/unshare/unshare_linux_test.go new file mode 100644 index 0000000000..405ca190f8 --- /dev/null +++ b/pkg/unshare/unshare_linux_test.go @@ -0,0 +1,195 @@ +//go:build linux +// +build linux + +package unshare + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "strconv" + "syscall" + "testing" + + "github.com/GoogleContainerTools/kaniko/pkg/idtools" + "github.com/GoogleContainerTools/kaniko/testutil" + "github.com/docker/docker/pkg/reexec" +) + +const ( + reportReexecKey = "report-reexec" +) + +func TestMain(m *testing.M) { + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +func init() { + reexec.Register(reportReexecKey, reportMain) +} + +func TestUnshareNamespaces(t *testing.T) { + for name, flag := range namespaces { + // always create user namespace because we might not be running as root + c := Command(syscall.CLONE_NEWUSER|flag, reportReexecKey) + buf := new(bytes.Buffer) + c.Stderr, c.Stdout = buf, buf + + t.Run(name, func(t *testing.T) { + err := c.Run() + if err != nil { + t.Fatalf("run %q: %v: %s", name, err, buf.String()) + } + // our namespace links + ns, err := getNamespaceLinks() + if err != nil { + t.Fatalf("getting namespace links: %v", err) + } + report := getReport(t, buf.Bytes()) + if report.Namespaces[name] == ns[name] { + t.Errorf("unshare didn't create a new %v namespace", name) + } + }) + } +} + +func TestUnshareIDMappings(t *testing.T) { + tests := []struct { + name string + unshareFlags int + want report + }{ + { + name: "no new namespace", + want: func() report { + var r report + var err error + r.Uidmap, r.Gidmap, err = idtools.GetHostIDMappings("") + if err != nil { + t.Fatalf("getting hostid mappings: %v", err) + } + r.Uid = uint32(os.Getuid()) + return r + }(), + }, + { + name: "user namespace", + unshareFlags: syscall.CLONE_NEWUSER, + want: func() report { + var r report + r.Uidmap, r.Gidmap = expectedMappings(t) + // when using user namespace we want to be root inside there + r.Uid = 0 + return r + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := Command(tt.unshareFlags, reportReexecKey) + buf := new(bytes.Buffer) + c.Stderr, c.Stdout = buf, buf + + err := c.Run() + if err != nil { + t.Fatalf("run %q: %v: %s", tt.name, err, buf.String()) + } + report := getReport(t, buf.Bytes()) + testutil.CheckDeepEqual(t, tt.want.Gidmap, report.Gidmap) + testutil.CheckDeepEqual(t, tt.want.Uidmap, report.Uidmap) + testutil.CheckDeepEqual(t, tt.want.Uid, report.Uid) + }) + } +} + +func getReport(t *testing.T, data []byte) report { + var report report + if err := json.Unmarshal(data, &report); err != nil { + t.Fatalf("error parsing results: %v", err) + } + return report +} + +// check which namespaces we are in +func getNamespaceLinks() (map[string]string, error) { + found := map[string]string{} + for name := range namespaces { + linkTarget, err := os.Readlink("/proc/self/ns/" + name) + if err != nil { + return nil, fmt.Errorf("reading link /proc/self/ns/%s: %w", name, err) + } + found[name] = linkTarget + } + return found, nil +} + +func expectedMappings(t *testing.T) ([]idtools.Mapping, []idtools.Mapping) { + u := testutil.GetCurrentUser(t) + uid, err := strconv.Atoi(u.Uid) + if err != nil { + t.Errorf("converting uid to int: %v", err) + } + gid, err := strconv.Atoi(u.Gid) + if err != nil { + t.Errorf("converting gid to int: %v", err) + } + + uidmap, gidmap, err := idtools.GetSubIDMappings(uint32(uid), uint32(gid), u.Username, u.PrimaryGroup) + if err != nil { + t.Errorf("getting subuid mappings: %v", err) + } + uidmap = append([]idtools.Mapping{{HostID: uint32(uid), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]idtools.Mapping{{HostID: uint32(gid), ContainerID: 0, Size: 1}}, gidmap...) + + return uidmap, gidmap +} + +type report struct { + Uidmap []idtools.Mapping + Gidmap []idtools.Mapping + Uid uint32 + Namespaces map[string]string +} + +var ( + namespaces = map[string]int{ + "ipc": syscall.CLONE_NEWIPC, + "net": syscall.CLONE_NEWNET, + "mnt": syscall.CLONE_NEWNS, + "user": syscall.CLONE_NEWUSER, + "uts": syscall.CLONE_NEWUTS, + } +) + +// reportMain will collect information about the unshared environment +// and write a report into a pipe for later use. +func reportMain() { + uidmap, gidmap, err := idtools.GetHostIDMappings("") + if err != nil { + fmt.Printf("error getting hostIDMappings: %v", err) + os.Exit(1) + } + + ns, err := getNamespaceLinks() + if err != nil { + fmt.Printf("error getting namespace links: %v", err) + os.Exit(1) + } + + r := report{ + Uidmap: uidmap, + Gidmap: gidmap, + Uid: uint32(os.Getuid()), + Namespaces: ns, + } + + err = json.NewEncoder(os.Stdout).Encode(r) + if err != nil { + fmt.Printf("error writing reportData to pipe: %v", err) + os.Exit(1) + } +} diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index 728f89dc4f..f431e8a616 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -33,7 +33,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/GoogleContainerTools/kaniko/pkg/config" + chrootuser "github.com/GoogleContainerTools/kaniko/pkg/chroot/user" ) // for testing @@ -127,7 +127,7 @@ func ResolveSources(srcs []string, root string) ([]string, error) { if err != nil { return nil, errors.Wrap(err, "resolving sources") } - resolved, err := matchSources(srcs, files) + resolved, err := matchSources(root, srcs, files) if err != nil { return nil, errors.Wrap(err, "matching sources") } @@ -136,7 +136,7 @@ func ResolveSources(srcs []string, root string) ([]string, error) { } // matchSources returns a list of sources that match wildcards -func matchSources(srcs, files []string) ([]string, error) { +func matchSources(rootDir string, srcs, files []string) ([]string, error) { var matchedSources []string for _, src := range srcs { if IsSrcRemoteFileURL(src) { @@ -146,7 +146,7 @@ func matchSources(srcs, files []string) ([]string, error) { src = filepath.Clean(src) for _, file := range files { if filepath.IsAbs(src) { - file = filepath.Join(config.RootDir, file) + file = filepath.Join(rootDir, file) } matched, err := filepath.Match(src, file) if err != nil { @@ -234,7 +234,9 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri totalSrcs++ } if totalSrcs > 1 && !IsDestDir(dest) { - return errors.New("when specifying multiple sources in a COPY command, destination must be a directory and end in '/'") + return errors.New( + "when specifying multiple sources in a COPY command, destination must be a directory and end in '/'", + ) } } @@ -342,7 +344,7 @@ Loop: return nil } -func GetUserGroup(chownStr string, env []string) (int64, int64, error) { +func GetUserGroup(rootDir string, chownStr string, env []string) (int64, int64, error) { if chownStr == "" { return DoNotChangeUID, DoNotChangeGID, nil } @@ -352,7 +354,7 @@ func GetUserGroup(chownStr string, env []string) (int64, int64, error) { return -1, -1, err } - uid32, gid32, err := getUIDAndGIDFromString(chown, true) + uid32, gid32, err := getUIDAndGIDFromString(rootDir, chown, true) if err != nil { return -1, -1, err } @@ -364,38 +366,39 @@ func GetUserGroup(chownStr string, env []string) (int64, int64, error) { // If fallbackToUID is set, the gid is equal to uid if the group is not specified // otherwise gid is set to zero. // UserID and GroupID don't need to be present on the system. -func getUIDAndGIDFromString(userGroupString string, fallbackToUID bool) (uint32, uint32, error) { +func getUIDAndGIDFromString(rootDir string, userGroupString string, fallbackToUID bool) (uint32, uint32, error) { userAndGroup := strings.Split(userGroupString, ":") userStr := userAndGroup[0] var groupStr string if len(userAndGroup) > 1 { groupStr = userAndGroup[1] } - return getUIDAndGIDFunc(userStr, groupStr, fallbackToUID) + return getUIDAndGIDFunc(rootDir, userStr, groupStr, fallbackToUID) } -func getUIDAndGID(userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) { - user, err := LookupUser(userStr) +func getUIDAndGID(rootDir string, userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) { + user, err := LookupUser(rootDir, userStr) if err != nil { return 0, 0, err } - uid32, err := getUID(user.Uid) + uid32, err := parseUID(user.Uid) if err != nil { return 0, 0, err } - gid, err := getGIDFromName(groupStr, fallbackToUID) + gid, err := getGIDFromName(rootDir, groupStr, fallbackToUID) if err != nil { - if errors.Is(err, fallbackToUIDError) { + if errors.Is(err, fallbackErr) { return uid32, uid32, nil } + return 0, 0, err } return uid32, gid, nil } -// getGID tries to parse the gid or falls back to getGroupFromName if it's not an id -func getGID(groupStr string, fallbackToUID bool) (uint32, error) { +// parseGID tries to parse the gid or falls back to getGroupFromName if it's not an id +func parseGID(groupStr string, fallbackToUID bool) (uint32, error) { gid, err := strconv.ParseUint(groupStr, 10, 32) if err != nil { return 0, fallbackToUIDOrError(err, fallbackToUID) @@ -405,66 +408,97 @@ func getGID(groupStr string, fallbackToUID bool) (uint32, error) { // getGIDFromName tries to parse the groupStr into an existing group. // if the group doesn't exist, fallback to getGID to parse non-existing valid GIDs. -func getGIDFromName(groupStr string, fallbackToUID bool) (uint32, error) { - group, err := user.LookupGroup(groupStr) +func getGIDFromName(rootDir string, groupStr string, fallbackToUID bool) (uint32, error) { + group, err := LookupGroup(rootDir, groupStr) if err != nil { - // unknown group error could relate to a non existing group - var groupErr *user.UnknownGroupError - if errors.Is(err, groupErr) { - return getGID(groupStr, fallbackToUID) - } - group, err = user.LookupGroupId(groupStr) + // at this point user.LookupGroup and user.LookupGroudId failed, so the group doesnt exist + // try to raw parse the groupStr as a gid + return parseGID(groupStr, fallbackToUID) + } + return parseGID(group.Gid, fallbackToUID) +} + +// LookupGroup will use chrootuser.GetGroup function to get the group +// This is done to get the group struct in environments, where the group file is not at /etc/group +func LookupGroup(rootDir string, group string) (*user.Group, error) { + if rootDir != "/" { + logrus.Debugf("detected chroot environment, getting group %v via chrootuser pkg", group) + group, err := chrootuser.GetGroup(rootDir, group) if err != nil { - return getGID(groupStr, fallbackToUID) + return nil, fmt.Errorf("getting group %v: %w", group, err) } + return group, nil } - return getGID(group.Gid, fallbackToUID) + grp, err := user.LookupGroup(group) + if err != nil { + var unknownGroupErr user.UnknownGroupError + // if error is of type unknownUserError, try to look up the user with Id + if errors.As(err, &unknownGroupErr) { + grp, err = user.LookupGroupId(group) + } + } + return grp, err } -var fallbackToUIDError = new(fallbackToUIDErrorType) +var fallbackErr = fallbackToUIDError{} -type fallbackToUIDErrorType struct{} +type fallbackToUIDError struct{} -func (e fallbackToUIDErrorType) Error() string { +func (e fallbackToUIDError) Error() string { return "fallback to uid" } func fallbackToUIDOrError(err error, fallbackToUID bool) error { if fallbackToUID { - return fallbackToUIDError + return fallbackErr } return err } // LookupUser will try to lookup the userStr inside the passwd file. // If the user does not exists, the function will fallback to parsing the userStr as an uid. -func LookupUser(userStr string) (*user.User, error) { - userObj, err := user.Lookup(userStr) +func LookupUser(rootDir, userStr string) (*user.User, error) { + userObj, err := lookupUser(rootDir, userStr) if err != nil { - unknownUserErr := new(user.UnknownUserError) - // only return if it's not an unknown user error - if !errors.As(err, unknownUserErr) { - return nil, err + // at this point, user.LookupUser and user.LookupId failed, so try to parse userStr as a raw uid + uid, err := parseUID(userStr) + if err != nil { + // at this point, the user does not exist and the userStr is not a valid number. + return nil, fmt.Errorf("user %v is not a uid and does not exist on the system", userStr) } + return &user.User{ + Uid: fmt.Sprint(uid), + HomeDir: "/", + }, nil + } + return userObj, nil +} - // Lookup by id - userObj, err = user.LookupId(userStr) +// lookupUser will lookup userStr as username or as uid if username lookup fails. +// In situations where rootdir != / use chrootuser.GetUser function to get the current user. +// This is done to get the user struct in environments, where the passwd file is not at /etc/passwd. +func lookupUser(rootDir string, userStr string) (*user.User, error) { + if rootDir != "/" { + logrus.Debugf("detected chroot environment, getting user %v via chrootuser pkg", userStr) + user, err := chrootuser.GetUser(rootDir, userStr) if err != nil { - uid, err := getUID(userStr) - if err != nil { - // at this point, the user does not exist and the userStr is not a valid number. - return nil, fmt.Errorf("user %v is not a uid and does not exist on the system", userStr) - } - userObj = &user.User{ - Uid: fmt.Sprint(uid), - HomeDir: "/", - } + return nil, err } + return user, nil } - return userObj, nil + u, err := user.Lookup(userStr) + if err != nil { + var unknownUserErr user.UnknownUserError + // if error is of type unknownUserError, try to look up the user with Id + if errors.As(err, &unknownUserErr) { + logrus.Debugf("user lookup with unknownUserError, try lookup %v as uid", userStr) + u, err = user.LookupId(userStr) + } + } + return u, err } -func getUID(userStr string) (uint32, error) { +func parseUID(userStr string) (uint32, error) { // checkif userStr is a valid id uid, err := strconv.ParseUint(userStr, 10, 32) if err != nil { diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index 483fbfff84..34884f9099 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -278,7 +278,7 @@ var matchSourcesTests = []struct { func Test_MatchSources(t *testing.T) { for _, test := range matchSourcesTests { - actualFiles, err := matchSources(test.srcs, test.files) + actualFiles, err := matchSources("/", test.srcs, test.files) sort.Strings(actualFiles) sort.Strings(test.expectedFiles) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedFiles, actualFiles) @@ -558,7 +558,7 @@ func TestGetUserGroup(t *testing.T) { description string chown string env []string - mockIDGetter func(userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) + mockIDGetter func(rootDir string, userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) // needed, in case uid is a valid number, but group is a name mockGroupIDGetter func(groupStr string) (*user.Group, error) expectedU int64 @@ -569,7 +569,7 @@ func TestGetUserGroup(t *testing.T) { description: "non empty chown", chown: "some:some", env: []string{}, - mockIDGetter: func(string, string, bool) (uint32, uint32, error) { + mockIDGetter: func(string, string, string, bool) (uint32, uint32, error) { return 100, 1000, nil }, expectedU: 100, @@ -579,7 +579,7 @@ func TestGetUserGroup(t *testing.T) { description: "non empty chown with env replacement", chown: "some:$foo", env: []string{"foo=key"}, - mockIDGetter: func(userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) { + mockIDGetter: func(rootDir string, userStr string, groupStr string, fallbackToUID bool) (uint32, uint32, error) { if userStr == "some" && groupStr == "key" { return 10, 100, nil } @@ -590,7 +590,7 @@ func TestGetUserGroup(t *testing.T) { }, { description: "empty chown string", - mockIDGetter: func(string, string, bool) (uint32, uint32, error) { + mockIDGetter: func(string, string, string, bool) (uint32, uint32, error) { return 0, 0, fmt.Errorf("should not be called") }, expectedU: -1, @@ -604,7 +604,7 @@ func TestGetUserGroup(t *testing.T) { getUIDAndGIDFunc = originalIDGetter }() getUIDAndGIDFunc = tc.mockIDGetter - uid, gid, err := GetUserGroup(tc.chown, tc.env) + uid, gid, err := GetUserGroup("/", tc.chown, tc.env) testutil.CheckErrorAndDeepEqual(t, tc.shdErr, err, uid, tc.expectedU) testutil.CheckErrorAndDeepEqual(t, tc.shdErr, err, gid, tc.expectedG) }) @@ -796,7 +796,7 @@ func Test_GetUIDAndGIDFromString(t *testing.T) { }, } for _, tt := range testCases { - uid, gid, err := getUIDAndGIDFromString(tt.args.userGroupStr, tt.args.fallbackToUID) + uid, gid, err := getUIDAndGIDFromString("/", tt.args.userGroupStr, tt.args.fallbackToUID) testutil.CheckError(t, tt.wantErr, err) if uid != tt.expected.userID || gid != tt.expected.groupID { t.Errorf("%v failed. Could not correctly decode %s to uid/gid %d:%d. Result: %d:%d", @@ -849,7 +849,7 @@ func TestLookupUser(t *testing.T) { } for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { - got, err := LookupUser(tt.args.userStr) + got, err := LookupUser("/", tt.args.userStr) testutil.CheckErrorAndDeepEqual(t, tt.wantErr, err, tt.expected, got) }) } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index d29fb43b46..1a146b004a 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -95,6 +95,9 @@ type FSConfig struct { type FSOpt func(*FSConfig) +// InitIgnoreList is global for testing purposes +var InitIgnoreList = InitIgnoreListFunc + func IgnoreList() []IgnoreListEntry { return ignorelist } @@ -216,9 +219,9 @@ func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, e } // DeleteFilesystem deletes the extracted image file system -func DeleteFilesystem() error { +func DeleteFilesystem(rootDir string) error { logrus.Info("Deleting filesystem...") - return filepath.Walk(config.RootDir, func(path string, info os.FileInfo, err error) error { + return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error { if err != nil { // ignore errors when deleting. return nil @@ -239,7 +242,7 @@ func DeleteFilesystem() error { logrus.Debugf("Not deleting %s, as it contains a ignored path", path) return nil } - if path == config.RootDir { + if path == rootDir { return nil } return os.RemoveAll(path) @@ -297,7 +300,8 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { return err } - if CheckIgnoreList(abs) && !checkIgnoreListRoot(dest) { + // TODO: check if removing the rootDir check is working + if CheckIgnoreList(abs) && !CheckIgnoreList(dest) { logrus.Debugf("Not adding %s because it is ignored", path) return nil } @@ -426,13 +430,6 @@ func CheckIgnoreList(path string) bool { return CheckProvidedIgnoreList(path, ignorelist) } -func checkIgnoreListRoot(root string) bool { - if root == config.RootDir { - return false - } - return CheckIgnoreList(root) -} - // Get ignorelist from roots of mounted files // Each line of /proc/self/mountinfo is in the form: // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue @@ -461,7 +458,7 @@ func DetectFilesystemIgnoreList(path string) error { } continue } - if lineArr[4] != config.RootDir { + if lineArr[4] != "/" { logrus.Tracef("Adding ignore list entry %s from line: %s", lineArr[4], line) ignorelist = append(ignorelist, IgnoreListEntry{ Path: lineArr[4], @@ -500,11 +497,11 @@ func RelativeFiles(fp string, root string) ([]string, error) { // ParentDirectories returns a list of paths to all parent directories // Ex. /some/temp/dir -> [/, /some, /some/temp, /some/temp/dir] -func ParentDirectories(path string) []string { +func ParentDirectories(rootDir string, path string) []string { dir := filepath.Clean(path) var paths []string for { - if dir == filepath.Clean(config.RootDir) || dir == "" || dir == "." { + if dir == filepath.Clean(rootDir) || dir == "" || dir == "." { break } dir, _ = filepath.Split(dir) @@ -512,7 +509,7 @@ func ParentDirectories(path string) []string { paths = append([]string{dir}, paths...) } if len(paths) == 0 { - paths = []string{config.RootDir} + paths = []string{rootDir} } return paths } @@ -524,7 +521,7 @@ func ParentDirectoriesWithoutLeadingSlash(path string) []string { path = filepath.Clean(path) dirs := strings.Split(path, "/") dirPath := "" - paths := []string{config.RootDir} + paths := []string{"/"} for index, dir := range dirs { if dir == "" || index == (len(dirs)-1) { continue @@ -1002,7 +999,7 @@ func CopyOwnership(src string, destDir string, root string) error { func createParentDirectory(path string) error { baseDir := filepath.Dir(path) - if info, err := os.Lstat(baseDir); os.IsNotExist(err) { + if info, err := os.Lstat(baseDir); errors.Is(err, os.ErrNotExist) { logrus.Tracef("BaseDir %s for file %s does not exist. Creating.", baseDir, path) if err := os.MkdirAll(baseDir, 0755); err != nil { return err @@ -1017,7 +1014,7 @@ func createParentDirectory(path string) error { // InitIgnoreList will initialize the ignore list using: // - defaultIgnoreList // - mounted paths via DetectFilesystemIgnoreList() -func InitIgnoreList(detectFilesystem bool) error { +func InitIgnoreListFunc(detectFilesystem bool) error { logrus.Trace("Initializing ignore list") ignorelist = append([]IgnoreListEntry{}, defaultIgnoreList...) @@ -1119,6 +1116,7 @@ func GetFSInfoMap(dir string, existing map[string]os.FileInfo) (map[string]os.Fi godirwalk.Walk(dir, &godirwalk.Options{ Callback: func(path string, ent *godirwalk.Dirent) error { if CheckIgnoreList(path) { + ent.IsDirOrSymlinkToDir() if IsDestDir(path) { logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path) return filepath.SkipDir diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 226658bd42..91d7e0a611 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -218,10 +218,7 @@ func Test_ParentDirectories(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - original := config.RootDir - defer func() { config.RootDir = original }() - config.RootDir = tt.rootDir - actual := ParentDirectories(tt.path) + actual := ParentDirectories(tt.rootDir, tt.path) testutil.CheckErrorAndDeepEqual(t, false, nil, tt.expected, actual) }) @@ -608,7 +605,7 @@ func createUncompressedTar(fileContents map[string]string, tarFileName, testDir if err != nil { return err } - t := NewTar(tarFile) + t := NewTar(testDir, tarFile) defer t.Close() for file := range fileContents { filePath := filepath.Join(testDir, file) @@ -660,7 +657,8 @@ func Test_UnTar(t *testing.T) { if err != nil { t.Fatal(err) } - fileList, err := UnTar(file, tc.destination) + dest := filepath.Join(testDir, tc.destination) + fileList, err := UnTar(file, dest) if err != nil { t.Fatal(err) } @@ -1092,6 +1090,16 @@ func Test_GetFSFromLayers_with_whiteouts_include_whiteout_enabled(t *testing.T) expectedFiles = append(expectedFiles, secondLayerFiles...) + // override InitIgnoreList func since tempDir could be part of the ignorelist on some systems and fail the test + originalFunc := InitIgnoreList + InitIgnoreList = func(detectFilesystem bool) error { + ignorelist = defaultIgnoreList + return nil + } + defer func() { + InitIgnoreList = originalFunc + }() + actualFiles, err := GetFSFromLayers(root, layers, opts...) assertGetFSFromLayers( @@ -1192,6 +1200,15 @@ func Test_GetFSFromLayers_with_whiteouts_include_whiteout_disabled(t *testing.T) mockLayer2, } + // override InitIgnoreList func since tempDir could be part of the ignorelist on some systems and fail the test + originalFunc := InitIgnoreList + InitIgnoreList = func(detectFilesystem bool) error { + ignorelist = defaultIgnoreList + return nil + } + defer func() { + InitIgnoreList = originalFunc + }() actualFiles, err := GetFSFromLayers(root, layers, opts...) assertGetFSFromLayers( @@ -1280,6 +1297,15 @@ func Test_GetFSFromLayers_ignorelist(t *testing.T) { mockLayer, } + // override InitIgnoreList func since tempDir could be part of the ignorelist on some systems and fail the test + originalFunc := InitIgnoreList + InitIgnoreList = func(detectFilesystem bool) error { + ignorelist = defaultIgnoreList + return nil + } + defer func() { + InitIgnoreList = originalFunc + }() actualFiles, err := GetFSFromLayers(root, layers, opts...) assertGetFSFromLayers( t, diff --git a/pkg/util/groupids_cgo.go b/pkg/util/groupids_cgo.go index 73865a638e..2d29933320 100644 --- a/pkg/util/groupids_cgo.go +++ b/pkg/util/groupids_cgo.go @@ -22,13 +22,18 @@ package util import ( "os/user" + + chrootuser "github.com/GoogleContainerTools/kaniko/pkg/chroot/user" ) // groupIDs returns all of the group ID's a user is a member of -func groupIDs(u *user.User) ([]string, error) { +func groupIDs(rootDir string, u *user.User) ([]string, error) { // user can have no gid if it's a non existing user if u.Gid == "" { return []string{}, nil } + if rootDir != "/" { + return chrootuser.GetAdditionalGroupIDs(rootDir, u) + } return u.GroupIds() } diff --git a/pkg/util/groupids_fallback.go b/pkg/util/groupids_fallback.go index e336d72e9f..57169717af 100644 --- a/pkg/util/groupids_fallback.go +++ b/pkg/util/groupids_fallback.go @@ -21,28 +21,15 @@ limitations under the License. package util import ( - "bufio" - "bytes" - "io" - "os" "os/user" - "strconv" - "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" -) - -var groupFile = "/etc/group" -type group struct { - id string // group ID - name string // group name - members []string // secondary group ids -} + chrootuser "github.com/GoogleContainerTools/kaniko/pkg/chroot/user" +) // groupIDs returns all of the group ID's a user is a member of -func groupIDs(u *user.User) ([]string, error) { +func groupIDs(rootDir string, u *user.User) ([]string, error) { logrus.Infof("Performing slow lookup of group ids for %s", u.Username) // user can have no gid if it's a non existing user @@ -50,49 +37,5 @@ func groupIDs(u *user.User) ([]string, error) { return []string{}, nil } - f, err := os.Open(groupFile) - if err != nil { - return nil, errors.Wrap(err, "open") - } - defer f.Close() - - gids := []string{u.Gid} - - for _, g := range localGroups(f) { - for _, m := range g.members { - if m == u.Username { - gids = append(gids, g.id) - } - } - } - - return gids, nil -} - -// localGroups parses a reader in /etc/group form, returning parsed group data -// based on src/os/user/lookup_unix.go - but extended to include secondary groups -func localGroups(r io.Reader) []*group { - var groups []*group - - bs := bufio.NewScanner(r) - for bs.Scan() { - line := bs.Bytes() - - // There's no spec for /etc/passwd or /etc/group, but we try to follow - // the same rules as the glibc parser, which allows comments and blank - // space at the beginning of a line. - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '#' { - continue - } - - // wheel:*:0:root,anotherGrp - parts := strings.SplitN(string(line), ":", 4) - if _, err := strconv.Atoi(parts[2]); err != nil { - continue - } - - groups = append(groups, &group{name: parts[0], id: parts[2], members: strings.Split(parts[3], ",")}) - } - return groups + return chrootuser.GetAdditionalGroupIDs(rootDir, u) } diff --git a/pkg/util/syscall_credentials.go b/pkg/util/syscall_credentials.go index a316ea004d..95749cf785 100644 --- a/pkg/util/syscall_credentials.go +++ b/pkg/util/syscall_credentials.go @@ -25,13 +25,12 @@ import ( "github.com/sirupsen/logrus" ) -func SyscallCredentials(userStr string) (*syscall.Credential, error) { - uid, gid, err := getUIDAndGIDFromString(userStr, true) +func SyscallCredentials(rootDir string, userGroupStr string) (*syscall.Credential, error) { + uid, gid, err := getUIDAndGIDFromString(rootDir, userGroupStr, true) if err != nil { - return nil, errors.Wrap(err, "get uid/gid") + return nil, err } - - u, err := LookupUser(fmt.Sprint(uid)) + u, err := LookupUser(rootDir, fmt.Sprint(uid)) if err != nil { return nil, errors.Wrap(err, "lookup") } @@ -40,12 +39,12 @@ func SyscallCredentials(userStr string) (*syscall.Credential, error) { // initiliaze empty groups := []uint32{} - gidStr, err := groupIDs(u) + additionalGids, err := groupIDs(rootDir, u) if err != nil { return nil, errors.Wrap(err, "group ids for user") } - for _, g := range gidStr { + for _, g := range additionalGids { i, err := strconv.ParseUint(g, 10, 32) if err != nil { return nil, errors.Wrap(err, "parseuint") diff --git a/pkg/util/syscall_credentials_test.go b/pkg/util/syscall_credentials_test.go index e987dcd773..05f1e7c2d0 100644 --- a/pkg/util/syscall_credentials_test.go +++ b/pkg/util/syscall_credentials_test.go @@ -92,7 +92,7 @@ func TestSyscallCredentials(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := SyscallCredentials(tt.args.userStr) + got, err := SyscallCredentials("/", tt.args.userStr) testutil.CheckErrorAndDeepEqual(t, tt.wantErr, err, tt.want, got) }) } diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index 47436b9ae6..41c73fff0b 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -29,7 +29,6 @@ import ( "strings" "syscall" - "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/system" "github.com/pkg/errors" @@ -40,22 +39,24 @@ import ( type Tar struct { hardlinks map[uint64]string w *tar.Writer + rootDir string } // NewTar will create an instance of Tar that can write files to the writer at f. -func NewTar(f io.Writer) Tar { +func NewTar(rootDir string, f io.Writer) Tar { w := tar.NewWriter(f) return Tar{ w: w, hardlinks: map[uint64]string{}, + rootDir: rootDir, } } -func CreateTarballOfDirectory(pathToDir string, f io.Writer) error { +func CreateTarballOfDirectory(rootDir string, pathToDir string, f io.Writer) error { if !filepath.IsAbs(pathToDir) { return errors.New("pathToDir is not absolute") } - tarWriter := NewTar(f) + tarWriter := NewTar(rootDir, f) defer tarWriter.Close() walkFn := func(path string, d fs.DirEntry, err error) error { @@ -80,7 +81,7 @@ func (t *Tar) Close() { func (t *Tar) AddFileToTar(p string) error { i, err := os.Lstat(p) if err != nil { - return fmt.Errorf("Failed to get file info for %s: %s", p, err) + return fmt.Errorf("Failed to get file info for %s: %w", p, err) } linkDst := "" if i.Mode()&os.ModeSymlink != 0 { @@ -103,14 +104,17 @@ func (t *Tar) AddFileToTar(p string) error { return err } - if p == config.RootDir { + if p == "/" { // allow entry for / to preserve permission changes etc. (currently ignored anyway by Docker runtime) hdr.Name = "/" } else { + // trim off the rootDirectory of the file inside the tarball. + // This is done because of chroot isolation possibility + trimmed := strings.TrimPrefix(p, t.rootDir) // Docker uses no leading / in the tarball - hdr.Name = strings.TrimPrefix(p, config.RootDir) - hdr.Name = strings.TrimLeft(hdr.Name, "/") + hdr.Name = strings.TrimLeft(trimmed, "/") } + logrus.Debugf("using name %v for file %v in tarball", hdr.Name, p) if hdr.Typeflag == tar.TypeDir && !strings.HasSuffix(hdr.Name, "/") { hdr.Name = hdr.Name + "/" } @@ -185,7 +189,7 @@ func (t *Tar) Whiteout(p string) error { th := &tar.Header{ // Docker uses no leading / in the tarball - Name: strings.TrimLeft(filepath.Join(dir, name), "/"), + Name: strings.TrimLeft(filepath.Join(dir, name), t.rootDir), Size: 0, } if err := t.w.WriteHeader(th); err != nil { diff --git a/pkg/util/tar_util_test.go b/pkg/util/tar_util_test.go index 565b2881de..9e0099e6c1 100644 --- a/pkg/util/tar_util_test.go +++ b/pkg/util/tar_util_test.go @@ -71,7 +71,7 @@ func Test_AddFileToTar(t *testing.T) { } buf := new(bytes.Buffer) - tarw := NewTar(buf) + tarw := NewTar(testDir, buf) if err := tarw.AddFileToTar(path); err != nil { t.Fatal(err) } @@ -120,7 +120,7 @@ func setUpFilesAndTars(testDir string) error { } func createTar(testdir string, writer io.Writer) error { - t := NewTar(writer) + t := NewTar(testdir, writer) defer t.Close() for _, regFile := range regularFiles { filePath := filepath.Join(testdir, regFile) @@ -136,7 +136,7 @@ func Test_CreateTarballOfDirectory(t *testing.T) { wantErr := false createFilesInTempDir(t, tmpDir) f := &bytes.Buffer{} - err := CreateTarballOfDirectory(tmpDir, f) + err := CreateTarballOfDirectory(tmpDir, tmpDir, f) testutil.CheckError(t, wantErr, err) extracedFilesDir := filepath.Join(tmpDir, "extracted") diff --git a/scripts/integration-test.sh b/scripts/integration-test.sh index f576518377..33f10ab1f4 100755 --- a/scripts/integration-test.sh +++ b/scripts/integration-test.sh @@ -55,4 +55,4 @@ FLAGS+=( "--repo=${IMAGE_REPO}" ) -go test ./integration/... "${FLAGS[@]}" "$@" +go test -v ./integration/... "${FLAGS[@]}" "$@"