-
Notifications
You must be signed in to change notification settings - Fork 349
Improve container sandbox status #479
Improve container sandbox status #479
Conversation
New sandbox info: $ # crictl inspects 6ecc8a386012a
{
"status": {
"id": "6ecc8a386012aa3d8cc41e986263a56c039044b7f9dcceda012cc3fc045e58c9",
"metadata": {
"attempt": 0,
"name": "kube-dns-6c857864fb-z88p2",
"namespace": "kube-system",
"uid": "64489eae-d610-11e7-8e8b-42010af00002"
},
"state": "SANDBOX_NOTREADY",
"createdAt": "2017-11-30T20:52:44.926737345Z",
"network": {
"ip": "10.88.22.12"
},
"linux": {
"namespaces": {
"options": {
"hostIpc": false,
"hostNetwork": false,
"hostPid": false
}
}
},
"labels": {
"io.kubernetes.pod.name": "kube-dns-6c857864fb-z88p2",
"io.kubernetes.pod.namespace": "kube-system",
"io.kubernetes.pod.uid": "64489eae-d610-11e7-8e8b-42010af00002",
"k8s-app": "kube-dns",
"pod-template-hash": "2741342096"
},
"annotations": {
"kubernetes.io/config.seen": "2017-11-30T20:52:41.854730835Z",
"kubernetes.io/config.source": "api",
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"info": {
"pid": 0,
"processStatus": "deleted",
"netNamespace": "/var/run/netns/cni-ae2bd03e-4f7e-d9d0-911e-dafa7bb860e2",
"netNamespaceClosed": false,
"image": "gcr.io/google_containers/pause:3.0",
"snapshotKey": "6ecc8a386012aa3d8cc41e986263a56c039044b7f9dcceda012cc3fc045e58c9",
"snapshotter": "overlayfs",
"config": {
"metadata": {
"name": "kube-dns-6c857864fb-z88p2",
"uid": "64489eae-d610-11e7-8e8b-42010af00002",
"namespace": "kube-system"
},
"hostname": "kube-dns-6c857864fb-z88p2",
"log_directory": "/var/log/pods/64489eae-d610-11e7-8e8b-42010af00002",
"dns_config": {
"servers": [
"169.254.169.254"
],
"searches": [
"c.noogler-kubernetes.google.com.internal",
"google.internal"
]
},
"port_mappings": [
{
"protocol": 1,
"container_port": 10053
},
{
"container_port": 10053
},
{
"container_port": 10055
},
{
"protocol": 1,
"container_port": 53
},
{
"container_port": 53
},
{
"container_port": 10054
}
],
"labels": {
"io.kubernetes.pod.name": "kube-dns-6c857864fb-z88p2",
"io.kubernetes.pod.namespace": "kube-system",
"io.kubernetes.pod.uid": "64489eae-d610-11e7-8e8b-42010af00002",
"k8s-app": "kube-dns",
"pod-template-hash": "2741342096"
},
"annotations": {
"kubernetes.io/config.seen": "2017-11-30T20:52:41.854730835Z",
"kubernetes.io/config.source": "api",
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"linux": {
"cgroup_parent": "/kubepods/burstable/pod64489eae-d610-11e7-8e8b-42010af00002",
"security_context": {
"namespace_options": {}
}
}
},
"runtimeSpec": {
"ociVersion": "1.0.0",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"args": [
"/pause"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true,
"oomScoreAdj": -998
},
"root": {
"path": "rootfs",
"readonly": true
},
"hostname": "kube-dns-6c857864fb-z88p2",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
],
"cpu": {
"shares": 2
}
},
"cgroupsPath": "/kubepods/burstable/pod64489eae-d610-11e7-8e8b-42010af00002/6ecc8a386012aa3d8cc41e986263a56c039044b7f9dcceda012cc3fc045e58c9",
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
},
{
"type": "network",
"path": "/var/run/netns/cni-ae2bd03e-4f7e-d9d0-911e-dafa7bb860e2"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}
}
} |
New container info: $ crictl inspect 98ad5842a6284
{
"status": {
"id": "98ad5842a62844f62d4468c52dbfcc081a08ba25c95c842a967de7058f60dd24",
"metadata": {
"attempt": 0,
"name": "nginx"
},
"state": "CONTAINER_EXITED",
"createdAt": "2017-11-30T21:57:26.038813532Z",
"startedAt": "2017-11-30T21:57:26.235406355Z",
"finishedAt": "2017-12-01T20:08:56.523559866Z",
"exitCode": 255,
"image": {
"image": "gcr.io/google-containers/nginx-slim-amd64:0.20"
},
"imageRef": "gcr.io/google-containers/nginx-slim-amd64@sha256:6654db6d4028756062edac466454ee5c9cf9b20ef79e35a81e3c840031eb1e2b",
"reason": "Unknown",
"message": "",
"labels": {
"io.kubernetes.container.name": "nginx",
"io.kubernetes.pod.name": "nginx",
"io.kubernetes.pod.namespace": "e2e-tests-kubectl-2jdtb",
"io.kubernetes.pod.uid": "6ffc887b-d619-11e7-8e8b-42010af00002"
},
"annotations": {
"io.kubernetes.container.hash": "cfada9cb",
"io.kubernetes.container.ports": "[{\"containerPort\":80,\"protocol\":\"TCP\"}]",
"io.kubernetes.container.restartCount": "0",
"io.kubernetes.container.terminationMessagePath": "/dev/termination-log",
"io.kubernetes.container.terminationMessagePolicy": "File",
"io.kubernetes.pod.terminationGracePeriod": "30"
},
"mounts": [
{
"containerPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"hostPath": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/volumes/kubernetes.io~secret/default-token-4ztmf",
"propagation": "PROPAGATION_PRIVATE",
"readonly": true,
"selinuxRelabel": false
},
{
"containerPath": "/etc/hosts",
"hostPath": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/etc-hosts",
"propagation": "PROPAGATION_PRIVATE",
"readonly": false,
"selinuxRelabel": false
},
{
"containerPath": "/dev/termination-log",
"hostPath": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/containers/nginx/5f93e895",
"propagation": "PROPAGATION_PRIVATE",
"readonly": false,
"selinuxRelabel": false
}
],
"logPath": "/var/log/pods/6ffc887b-d619-11e7-8e8b-42010af00002/nginx_0.log"
},
"info": {
"sandboxID": "4ed90697eba7b9e9661599b450c5ed5e28219471e1d52b2d3f1d18495a18bd70",
"path": "nginx",
"args": [
"-g",
"daemon off;"
],
"pid": 3664,
"removing": false,
"snapshotKey": "98ad5842a62844f62d4468c52dbfcc081a08ba25c95c842a967de7058f60dd24",
"snapshotter": "overlayfs",
"config": {
"metadata": {
"name": "nginx"
},
"image": {
"image": "gcr.io/google-containers/nginx-slim-amd64@sha256:6654db6d4028756062edac466454ee5c9cf9b20ef79e35a81e3c840031eb1e2b"
},
"envs": [
{
"key": "KUBERNETES_PORT",
"value": "tcp://10.0.0.1:443"
},
{
"key": "KUBERNETES_PORT_443_TCP",
"value": "tcp://10.0.0.1:443"
},
{
"key": "KUBERNETES_PORT_443_TCP_PROTO",
"value": "tcp"
},
{
"key": "KUBERNETES_PORT_443_TCP_PORT",
"value": "443"
},
{
"key": "KUBERNETES_PORT_443_TCP_ADDR",
"value": "10.0.0.1"
},
{
"key": "KUBERNETES_SERVICE_HOST",
"value": "10.0.0.1"
},
{
"key": "KUBERNETES_SERVICE_PORT",
"value": "443"
},
{
"key": "KUBERNETES_SERVICE_PORT_HTTPS",
"value": "443"
}
],
"mounts": [
{
"container_path": "/var/run/secrets/kubernetes.io/serviceaccount",
"host_path": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/volumes/kubernetes.io~secret/default-token-4ztmf",
"readonly": true
},
{
"container_path": "/etc/hosts",
"host_path": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/etc-hosts"
},
{
"container_path": "/dev/termination-log",
"host_path": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/containers/nginx/5f93e895"
}
],
"labels": {
"io.kubernetes.container.name": "nginx",
"io.kubernetes.pod.name": "nginx",
"io.kubernetes.pod.namespace": "e2e-tests-kubectl-2jdtb",
"io.kubernetes.pod.uid": "6ffc887b-d619-11e7-8e8b-42010af00002"
},
"annotations": {
"io.kubernetes.container.hash": "cfada9cb",
"io.kubernetes.container.ports": "[{\"containerPort\":80,\"protocol\":\"TCP\"}]",
"io.kubernetes.container.restartCount": "0",
"io.kubernetes.container.terminationMessagePath": "/dev/termination-log",
"io.kubernetes.container.terminationMessagePolicy": "File",
"io.kubernetes.pod.terminationGracePeriod": "30"
},
"log_path": "nginx_0.log",
"linux": {
"resources": {
"cpu_shares": 2,
"oom_score_adj": 1000
},
"security_context": {
"namespace_options": {},
"run_as_user": {}
}
}
},
"runtimeSpec": {
"ociVersion": "1.0.0",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"args": [
"nginx",
"-g",
"daemon off;"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"KUBERNETES_PORT=tcp://10.0.0.1:443",
"KUBERNETES_PORT_443_TCP=tcp://10.0.0.1:443",
"KUBERNETES_PORT_443_TCP_PROTO=tcp",
"KUBERNETES_PORT_443_TCP_PORT=443",
"KUBERNETES_PORT_443_TCP_ADDR=10.0.0.1",
"KUBERNETES_SERVICE_HOST=10.0.0.1",
"KUBERNETES_SERVICE_PORT=443",
"KUBERNETES_SERVICE_PORT_HTTPS=443"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"apparmorProfile": "cri-containerd.apparmor.d",
"oomScoreAdj": 1000
},
"root": {
"path": "rootfs"
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
},
{
"destination": "/etc/resolv.conf",
"type": "bind",
"source": "/var/lib/cri-containerd/sandboxes/4ed90697eba7b9e9661599b450c5ed5e28219471e1d52b2d3f1d18495a18bd70/resolv.conf",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/dev/shm",
"type": "bind",
"source": "/var/lib/cri-containerd/sandboxes/4ed90697eba7b9e9661599b450c5ed5e28219471e1d52b2d3f1d18495a18bd70/shm",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/var/run/secrets/kubernetes.io/serviceaccount",
"type": "bind",
"source": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/volumes/kubernetes.io~secret/default-token-4ztmf",
"options": [
"rbind",
"rprivate",
"ro"
]
},
{
"destination": "/etc/hosts",
"type": "bind",
"source": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/etc-hosts",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/dev/termination-log",
"type": "bind",
"source": "/var/lib/kubelet/pods/6ffc887b-d619-11e7-8e8b-42010af00002/containers/nginx/5f93e895",
"options": [
"rbind",
"rprivate",
"rw"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
],
"memory": {
"limit": 0
},
"cpu": {
"shares": 2,
"quota": 0,
"period": 0
}
},
"cgroupsPath": "/kubepods/besteffort/pod6ffc887b-d619-11e7-8e8b-42010af00002/98ad5842a62844f62d4468c52dbfcc081a08ba25c95c842a967de7058f60dd24",
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc",
"path": "/proc/3346/ns/ipc"
},
{
"type": "uts",
"path": "/proc/3346/ns/uts"
},
{
"type": "mount"
},
{
"type": "network",
"path": "/proc/3346/ns/net"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}
}
} |
@@ -56,6 +58,8 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime. | |||
if taskStatus.Status == containerd.Running { | |||
state = runtime.PodSandboxState_SANDBOX_READY |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it better that putting as below into toCRISandboxStatus?
We get the original info at the outside, processing it inside.
if taskStatus.Status == containerd.Running {
state = runtime.PodSandboxState_SANDBOX_READY
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Will send another PR to do that. Thanks!
pkg/server/sandbox_status.go
Outdated
var snapshotkey, snapshotter string | ||
ctrInfo, err := container.Info(ctx) | ||
container := sandbox.Container | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
blank line
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
hard to identify find was was added vs what was changed for change sake...
@@ -200,3 +144,71 @@ func toCRISandboxStatus(meta sandboxstore.Metadata, state runtime.PodSandboxStat | |||
Annotations: meta.Config.GetAnnotations(), | |||
} | |||
} | |||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why move?
pkg/server/container_status.go
Outdated
SandboxID string `json:"sandboxID"` | ||
Path string `json:"path"` |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These are useful but already in the runtime spec, in the expected location.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm a little bit unsure about this. Runtime spec is too big, but it is a bit of duplication to add it here.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removed.
@@ -69,7 +73,7 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime. | |||
} | |||
createdAt := ctrInfo.CreatedAt | |||
status := toCRISandboxStatus(sandbox.Metadata, state, createdAt, ip) | |||
info, err := toCRISandboxContainerInfo(ctx, sandbox.Container, r.GetVerbose()) | |||
info, err := toCRISandboxInfo(ctx, sandbox, pid, processStatus, r.GetVerbose()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
preferred it as it was..
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's not only "container info", right? e.g. PodSandboxConfig
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I meant the parameters not the name :-)
pkg/server/sandbox_status.go
Outdated
} else { | ||
glog.Errorf("Failed to get target shapshot info: %v", err) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
preferred it this way
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Now we also get sandbox container image, it's not only snapshot info.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The call to container.Spec and container.Info and error checking are in both versions just moved around in this one. The prior way used variables for temp storage in the function then did a "to" sandboxInfo at the end. The new one creates the sandboxInfo early and has various assigns as it goes. A lot of style changes mixed in with the actual changes, makes it hard to see what's new vs. what is just changed for style sake.
pkg/server/sandbox_status.go
Outdated
glog.Errorf("Failed to get target shapshot info: %v", err) | ||
} | ||
|
||
si := &sandboxInfo{ |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
same
pkg/server/sandbox_status.go
Outdated
if !verbose { | ||
return nil, nil | ||
} | ||
|
||
task, err := container.Task(ctx, nil) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what was wrong here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I mentioned in the previous comment. It's troublesome to deal with task error (see the logic above).
Since we've got task status in PodSandboxStatus
, we may want to reuse it.
In the original code here, it's not handling task not found case properly. Note that it's very common that task is deleted but sandbox still exists just not ready.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
sandbox container task (pause container) was deleted? But sandbox still exists just not ready.. but we don't need to know that in debug, so we use pod sandbox status? This is for "extra" verbose debug information, I thought it important to show "raw" task status here in verbose mode vs. cached podsandboxstatus.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@mikebrow We want to know whether a sandbox container task state is stopped
or deleted
, right?
Actually I don't quite get what you are talking about, sorry. :P
if err != nil { | ||
return nil, fmt.Errorf("failed to get sandbox container task: %v", err) | ||
} | ||
status, err := task.Status(ctx) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why change?
pkg/server/sandbox_status.go
Outdated
} else { | ||
glog.Errorf("failed to marshal info %v: %v", si, err) | ||
info["info"] = err.Error() | ||
glog.Errorf("Failed to get sandbox contaienr %q info: %v", sandbox.ID, err) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
container
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
pkg/server/sandbox_status.go
Outdated
type sandboxInfo struct { | ||
Pid uint32 `json:"pid"` | ||
Status string `json:"processStatus"` | ||
NetNS string `json:"netNamespace"` |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
already in the spec in the right spot
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is critical information for sandbox, it's quite hard to find in runtime spec. We want to highlight it.
We may need to remove runtime spec or higher its verbosity in the future. Do not want this to be affected.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'll keep NetNS
here, because we have NetNSClosed
just below it, which makes sense to me.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| grep
:-)
But yeah one could make the argument to pull up every piece of information in the spec because we might want to highlight it. Sort of depends on what you are doing at any point in time. Debugging Security issue? Debugging network issue? Mount issue?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Will also remove then.
pkg/server/sandbox_status.go
Outdated
} | ||
|
||
info := make(map[string]string) | ||
pid := task.Pid() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
correct pid.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For running sandbox, it is non-zero:
"info": {
"pid": 26174,
"processStatus": "running",
"netNamespace": "/var/run/netns/cni-343162ed-9064-64c8-2ed8-b6a6f8213e0e",
"netNamespaceClosed": false,
"image": "gcr.io/google_containers/pause:3.0",
"snapshotKey": "d2ec0574ba0f8cf0ed9bc14e5b7cc7dc365567bb2456cc315e0d3547a4bea5e5",
"snapshotter": "overlayfs",
"config": {
"metadata": {
"name": "nginx-sandbox",
"uid": "hdishd83djaidwnduwk28bcsb",
"namespace": "default",
"attempt": 1
},
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
looks good .. I saw 0 before not sure what changed but good!
pkg/server/sandbox_status.go
Outdated
if err != nil { | ||
return nil, fmt.Errorf("failed to get sandbox container task status: %v", err) | ||
si := &sandboxInfo{ | ||
Pid: pid, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
see output pid zero is wrong.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That's what you see in docker inspect
for exited container.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
could've sworn I checked and it was running?
pkg/server/sandbox_status.go
Outdated
if err != nil { | ||
glog.Errorf("Failed to get sandbox container runtime spec: %v", err) | ||
if sandbox.NetNSPath != "" { | ||
// Add network closed information if sandbox is not using host network. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
thumbs up!
pkg/server/sandbox_status.go
Outdated
// Do not use config.SandboxImage because the configuration might | ||
// be changed during restart. It may not reflect the actual image | ||
// used by the sandbox container. | ||
si.Image = ctrInfo.Image |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
all good to update if these are not nil?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What do you mean?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we do a if ctrInfo.Image != ""
check?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems not. If it is empty, then just assign an empty value. No difference.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I might've been confused by the comment. Not sure why we would update with nil here, if that is a reasonable thing to report/do. Just felt off :-)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I just mean the image in container info is the source of truth. We should use it whatever it is. :)
1d61e0d
to
c98eeb1
Compare
Signed-off-by: Lantao Liu <[email protected]>
Signed-off-by: Lantao Liu <[email protected]>
c98eeb1
to
dd017e6
Compare
@mikebrow Done. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Couple small comments.
@@ -69,7 +73,7 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime. | |||
} | |||
createdAt := ctrInfo.CreatedAt | |||
status := toCRISandboxStatus(sandbox.Metadata, state, createdAt, ip) | |||
info, err := toCRISandboxContainerInfo(ctx, sandbox.Container, r.GetVerbose()) | |||
info, err := toCRISandboxInfo(ctx, sandbox, pid, processStatus, r.GetVerbose()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I meant the parameters not the name :-)
pkg/server/sandbox_status.go
Outdated
if !verbose { | ||
return nil, nil | ||
} | ||
|
||
task, err := container.Task(ctx, nil) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
sandbox container task (pause container) was deleted? But sandbox still exists just not ready.. but we don't need to know that in debug, so we use pod sandbox status? This is for "extra" verbose debug information, I thought it important to show "raw" task status here in verbose mode vs. cached podsandboxstatus.
pkg/server/sandbox_status.go
Outdated
} | ||
|
||
info := make(map[string]string) | ||
pid := task.Pid() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
looks good .. I saw 0 before not sure what changed but good!
pkg/server/sandbox_status.go
Outdated
} else { | ||
glog.Errorf("Failed to get target shapshot info: %v", err) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The call to container.Spec and container.Info and error checking are in both versions just moved around in this one. The prior way used variables for temp storage in the function then did a "to" sandboxInfo at the end. The new one creates the sandboxInfo early and has various assigns as it goes. A lot of style changes mixed in with the actual changes, makes it hard to see what's new vs. what is just changed for style sake.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
/LGTM
This PR:
Path
andArgs
in container info.docker inspect
also has this information at top.NetNS
,NetNSClosed
,Image
andSandboxConfig
in sandbox info.containerd.Task
.@mikebrow