diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b872db8d77..1e9794b716 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,8 +19,8 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: - version: v1.38.0 - args: --timeout=5m + version: v1.42.0 # Has fixes for stylecheck configuration https://github.com/golangci/golangci-lint/pull/2017/files + args: --timeout=5m -v test: runs-on: 'windows-2019' diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..16b25be554 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,96 @@ +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/cmd/containerd-shim-runhcs-v1/task_hcs.go b/cmd/containerd-shim-runhcs-v1/task_hcs.go index dee9758a46..484b21cf1a 100644 --- a/cmd/containerd-shim-runhcs-v1/task_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/task_hcs.go @@ -18,7 +18,6 @@ import ( "github.com/sirupsen/logrus" "go.opencensus.io/trace" - "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" "github.com/Microsoft/hcsshim/internal/cmd" @@ -587,7 +586,7 @@ func (ht *hcsTask) DeleteExec(ctx context.Context, eid string) (int, uint32, tim return int(status.Pid), status.ExitStatus, status.ExitedAt, nil } -func (ht *hcsTask) Pids(ctx context.Context) ([]options.ProcessDetails, error) { +func (ht *hcsTask) Pids(ctx context.Context) ([]runhcsopts.ProcessDetails, error) { // Map all user created exec's to pid/exec-id pidMap := make(map[int]string) ht.execs.Range(func(key, value interface{}) bool { @@ -606,7 +605,7 @@ func (ht *hcsTask) Pids(ctx context.Context) ([]options.ProcessDetails, error) { } // Copy to pid/exec-id pair's - pairs := make([]options.ProcessDetails, len(props.ProcessList)) + pairs := make([]runhcsopts.ProcessDetails, len(props.ProcessList)) for i, p := range props.ProcessList { pairs[i].ImageName = p.ImageName pairs[i].CreatedAt = p.CreateTimestamp diff --git a/cmd/gcstools/commoncli/common.go b/cmd/gcstools/commoncli/common.go index 9359c1eeba..0e8f00f432 100644 --- a/cmd/gcstools/commoncli/common.go +++ b/cmd/gcstools/commoncli/common.go @@ -20,7 +20,7 @@ func SetFlagsForLogging() []*string { // SetupLogging creates the logger from the command line parameters. func SetupLogging(args ...*string) error { if len(args) < 1 { - return fmt.Errorf("Invalid log params") + return fmt.Errorf("invalid log params") } level, err := logrus.ParseLevel(*args[1]) if err != nil { diff --git a/cmd/runhcs/delete.go b/cmd/runhcs/delete.go index cebea043c8..3edf3b8d41 100644 --- a/cmd/runhcs/delete.go +++ b/cmd/runhcs/delete.go @@ -57,7 +57,7 @@ status of "ubuntu01" as "stopped" the following will delete resources held for kill = true default: if !force { - return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s) + return fmt.Errorf("cannot delete container %s that is not stopped: %s", id, s) } kill = true } diff --git a/cmd/runhcs/exec.go b/cmd/runhcs/exec.go index befc79f029..3b4941f77c 100644 --- a/cmd/runhcs/exec.go +++ b/cmd/runhcs/exec.go @@ -146,12 +146,12 @@ func getProcessSpec(context *cli.Context, c *container) (*specs.Process, error) func validateProcessSpec(spec *specs.Process) error { if spec.Cwd == "" { - return fmt.Errorf("Cwd property must not be empty") + return fmt.Errorf("cwd property must not be empty") } // IsAbs doesnt recognize Unix paths on Windows builds so handle that case // here. if !filepath.IsAbs(spec.Cwd) && !strings.HasPrefix(spec.Cwd, "/") { - return fmt.Errorf("Cwd must be an absolute path") + return fmt.Errorf("cwd must be an absolute path") } if len(spec.Args) == 0 { return fmt.Errorf("args must not be empty") diff --git a/hcn/hcn.go b/hcn/hcn.go index b6678fb65f..df3a59a78c 100644 --- a/hcn/hcn.go +++ b/hcn/hcn.go @@ -122,7 +122,7 @@ func defaultQuery() HostComputeQuery { // PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("Platform does not support feature %s", featureName) + return fmt.Errorf("platform does not support feature %s", featureName) } // V2ApiSupported returns an error if the HCN version does not support the V2 Apis. diff --git a/hcn/hcnroute.go b/hcn/hcnroute.go index d6d27079bc..52e2498462 100644 --- a/hcn/hcnroute.go +++ b/hcn/hcnroute.go @@ -137,7 +137,7 @@ func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) if len(endpoints) <= 0 { - return nil, errors.New("Missing endpoints") + return nil, errors.New("missing endpoints") } route := &HostComputeRoute{ diff --git a/internal/hcsoci/create.go b/internal/hcsoci/create.go index d1b0a729c1..cd9ca03f7e 100644 --- a/internal/hcsoci/create.go +++ b/internal/hcsoci/create.go @@ -135,7 +135,7 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er } if coi.HostingSystem != nil && coi.templateID != "" && !coi.HostingSystem.IsClone { - return fmt.Errorf("A container can not be cloned inside a non cloned POD") + return fmt.Errorf("a container can not be cloned inside a non cloned POD") } if coi.templateID != "" { @@ -152,11 +152,11 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate { if len(coi.Spec.Windows.Devices) != 0 { - return fmt.Errorf("Mapped Devices are not supported for template containers") + return fmt.Errorf("mapped Devices are not supported for template containers") } if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - return fmt.Errorf("gMSA specifications are not supported for template containers") + return fmt.Errorf("gmsa specifications are not supported for template containers") } if coi.Spec.Windows.Servicing { @@ -179,7 +179,7 @@ func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions) } if coi.Spec == nil { - return nil, fmt.Errorf("Spec must be supplied") + return nil, fmt.Errorf("spec must be supplied") } // Defaults if omitted by caller. diff --git a/internal/hcsoci/hcsdoc_wcow.go b/internal/hcsoci/hcsdoc_wcow.go index af9e6deb38..f27b5da27e 100644 --- a/internal/hcsoci/hcsdoc_wcow.go +++ b/internal/hcsoci/hcsdoc_wcow.go @@ -269,7 +269,7 @@ func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInter // Use the reserved network namespace for containers created inside // cloned or template UVMs. if coi.HostingSystem != nil && (coi.HostingSystem.IsTemplate || coi.HostingSystem.IsClone) { - v2Container.Networking.Namespace = uvm.DEFAULT_CLONE_NETWORK_NAMESPACE_ID + v2Container.Networking.Namespace = uvm.DefaultCloneNetworkNamespaceID } else { v2Container.Networking.Namespace = coi.actualNetworkNamespace } diff --git a/internal/jobcontainers/storage.go b/internal/jobcontainers/storage.go index 1ff1ac4744..c9fe72228a 100644 --- a/internal/jobcontainers/storage.go +++ b/internal/jobcontainers/storage.go @@ -15,7 +15,7 @@ import ( // Trailing backslash required for SetVolumeMountPoint and DeleteVolumeMountPoint const sandboxMountFormat = `C:\C\%s\` -func mountLayers(ctx context.Context, containerId string, s *specs.Spec, volumeMountPath string) error { +func mountLayers(ctx context.Context, containerID string, s *specs.Spec, volumeMountPath string) error { if s == nil || s.Windows == nil || s.Windows.LayerFolders == nil { return errors.New("field 'Spec.Windows.Layerfolders' is not populated") } @@ -41,7 +41,7 @@ func mountLayers(ctx context.Context, containerId string, s *specs.Spec, volumeM if s.Root.Path == "" { log.G(ctx).Debug("mounting job container storage") - containerRootPath, err := layers.MountContainerLayers(ctx, containerId, s.Windows.LayerFolders, "", volumeMountPath, nil) + containerRootPath, err := layers.MountContainerLayers(ctx, containerID, s.Windows.LayerFolders, "", volumeMountPath, nil) if err != nil { return errors.Wrap(err, "failed to mount container storage") } diff --git a/internal/layers/layers.go b/internal/layers/layers.go index eaef3c3725..146f9accf2 100644 --- a/internal/layers/layers.go +++ b/internal/layers/layers.go @@ -15,7 +15,6 @@ import ( "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/ospath" "github.com/Microsoft/hcsshim/internal/uvm" - uvmpkg "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -78,10 +77,10 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { // // TODO dcantah: Keep better track of the layers that are added, don't simply discard the SCSI, VSMB, etc. resource types gotten inside. -func MountContainerLayers(ctx context.Context, containerId string, layerFolders []string, guestRoot string, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) { +func MountContainerLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, err error) { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers") - if uvm == nil { + if vm == nil { if len(layerFolders) < 2 { return "", errors.New("need at least two layers - base and scratch") } @@ -159,7 +158,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders } // V2 UVM - log.G(ctx).WithField("os", uvm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM") + log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM") var ( layersAdded []string @@ -167,15 +166,15 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders ) defer func() { if err != nil { - if uvm.OS() == "windows" { + if vm.OS() == "windows" { for _, l := range layersAdded { - if err := uvm.RemoveVSMB(ctx, l, true); err != nil { + if err := vm.RemoveVSMB(ctx, l, true); err != nil { log.G(ctx).WithError(err).Warn("failed to remove wcow layer on cleanup") } } } else { for _, l := range layersAdded { - if err := removeLCOWLayer(ctx, uvm, l); err != nil { + if err := removeLCOWLayer(ctx, vm, l); err != nil { log.G(ctx).WithError(err).Warn("failed to remove lcow layer on cleanup") } } @@ -185,13 +184,13 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders for _, layerPath := range layerFolders[:len(layerFolders)-1] { log.G(ctx).WithField("layerPath", layerPath).Debug("mounting layer") - if uvm.OS() == "windows" { - options := uvm.DefaultVSMBOptions(true) + if vm.OS() == "windows" { + options := vm.DefaultVSMBOptions(true) options.TakeBackupPrivilege = true - if uvm.IsTemplate { - uvm.SetSaveableVSMBOptions(options, options.ReadOnly) + if vm.IsTemplate { + vm.SetSaveableVSMBOptions(options, options.ReadOnly) } - if _, err := uvm.AddVSMB(ctx, layerPath, options); err != nil { + if _, err := vm.AddVSMB(ctx, layerPath, options); err != nil { return "", fmt.Errorf("failed to add VSMB layer: %s", err) } layersAdded = append(layersAdded, layerPath) @@ -200,7 +199,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders layerPath = filepath.Join(layerPath, "layer.vhd") uvmPath string ) - uvmPath, err = addLCOWLayer(ctx, uvm, layerPath) + uvmPath, err = addLCOWLayer(ctx, vm, layerPath) if err != nil { return "", fmt.Errorf("failed to add LCOW layer: %s", err) } @@ -209,7 +208,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders } } - containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot) + containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot) hostPath, err := getScratchVHDPath(layerFolders) if err != nil { return "", fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err) @@ -217,7 +216,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") var options []string - scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvmpkg.VMAccessTypeIndividual) + scsiMount, err := vm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvm.VMAccessTypeIndividual) if err != nil { return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } @@ -225,7 +224,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders // This handles the case where we want to share a scratch disk for multiple containers instead // of mounting a new one. Pass a unique value for `ScratchPath` to avoid container upper and // work directories colliding in the UVM. - if scsiMount.RefCount() > 1 && uvm.OS() == "linux" { + if scsiMount.RefCount() > 1 && vm.OS() == "linux" { scratchFmt := fmt.Sprintf("container_%s", filepath.Base(containerScratchPathInUVM)) containerScratchPathInUVM = ospath.Join("linux", scsiMount.UVMPath, scratchFmt) } else { @@ -234,26 +233,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders defer func() { if err != nil { - if err := uvm.RemoveSCSI(ctx, hostPath); err != nil { + if err := vm.RemoveSCSI(ctx, hostPath); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup") } } }() var rootfs string - if uvm.OS() == "windows" { + if vm.OS() == "windows" { // Load the filter at the C:\s location calculated above. We pass into this request each of the // read-only layer folders. var layers []hcsschema.Layer - layers, err = GetHCSLayers(ctx, uvm, layersAdded) + layers, err = GetHCSLayers(ctx, vm, layersAdded) if err != nil { return "", err } - err = uvm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM) + err = vm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM) rootfs = containerScratchPathInUVM } else { - rootfs = ospath.Join(uvm.OS(), guestRoot, uvmpkg.RootfsPath) - err = uvm.CombineLayersLCOW(ctx, containerId, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) + rootfs = ospath.Join(vm.OS(), guestRoot, uvm.RootfsPath) + err = vm.CombineLayersLCOW(ctx, containerID, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) } if err != nil { return "", err @@ -262,26 +261,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders return rootfs, nil } -func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) (uvmPath string, err error) { +func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) (uvmPath string, err error) { // don't try to add as vpmem when we want additional devices on the uvm to be fully physically backed - if !uvm.DevicesPhysicallyBacked() { + if !vm.DevicesPhysicallyBacked() { // We first try vPMEM and if it is full or the file is too large we // fall back to SCSI. - uvmPath, err = uvm.AddVPMem(ctx, layerPath) + uvmPath, err = vm.AddVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Added LCOW layer") return uvmPath, nil - } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMemLayerSize { + } else if err != uvm.ErrNoAvailableLocation && err != uvm.ErrMaxVPMemLayerSize { return "", fmt.Errorf("failed to add VPMEM layer: %s", err) } } options := []string{"ro"} - uvmPath = fmt.Sprintf(uvmpkg.LCOWGlobalMountPrefix, uvm.UVMMountCounter()) - sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvmpkg.VMAccessTypeNoop) + uvmPath = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, vm.UVMMountCounter()) + sm, err := vm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvm.VMAccessTypeNoop) if err != nil { return "", fmt.Errorf("failed to add SCSI layer: %s", err) } @@ -292,17 +291,17 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) return sm.UVMPath, nil } -func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) error { +func removeLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) error { // Assume it was added to vPMEM and fall back to SCSI - err := uvm.RemoveVPMem(ctx, layerPath) + err := vm.RemoveVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Removed LCOW layer") return nil - } else if err == uvmpkg.ErrNotAttached { - err = uvm.RemoveSCSI(ctx, layerPath) + } else if err == uvm.ErrNotAttached { + err = vm.RemoveSCSI(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, @@ -331,9 +330,9 @@ const ( ) // UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting -func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, uvm *uvmpkg.UtilityVM, op UnmountOperation) error { +func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, vm *uvm.UtilityVM, op UnmountOperation) error { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::unmountContainerLayers") - if uvm == nil { + if vm == nil { // Must be an argon - folders are mounted on the host if op != UnmountOperationAll { return errors.New("only operation supported for host-mounted folders is unmountOperationAll") @@ -367,13 +366,13 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Always remove the combined layers as they are part of scsi/vsmb/vpmem // removals. - if uvm.OS() == "windows" { - if err := uvm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { + if vm.OS() == "windows" { + if err := vm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") retError = err } } else { - if err := uvm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { + if err := vm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") retError = err } @@ -385,7 +384,7 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe if err != nil { return errors.Wrap(err, "failed to get scratch VHD path in layer folders") } - if err := uvm.RemoveSCSI(ctx, hostScratchFile); err != nil { + if err := vm.RemoveSCSI(ctx, hostScratchFile); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch") if retError == nil { retError = err @@ -398,9 +397,9 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Remove each of the read-only layers from VSMB. These's are ref-counted and // only removed once the count drops to zero. This allows multiple containers // to share layers. - if uvm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB { + if vm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB { for _, layerPath := range layerFolders[:len(layerFolders)-1] { - if e := uvm.RemoveVSMB(ctx, layerPath, true); e != nil { + if e := vm.RemoveVSMB(ctx, layerPath, true); e != nil { log.G(ctx).WithError(e).Warn("remove VSMB failed") if retError == nil { retError = e @@ -414,10 +413,10 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted // and only removed once the count drops to zero. This allows multiple containers to // share layers. Note that SCSI is used on large layers. - if uvm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { + if vm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { for _, layerPath := range layerFolders[:len(layerFolders)-1] { hostPath := filepath.Join(layerPath, "layer.vhd") - if err := removeLCOWLayer(ctx, uvm, hostPath); err != nil { + if err := removeLCOWLayer(ctx, vm, hostPath); err != nil { log.G(ctx).WithError(err).Warn("remove layer failed") if retError == nil { retError = err @@ -447,11 +446,11 @@ func GetHCSLayers(ctx context.Context, vm *uvm.UtilityVM, paths []string) (layer return layers, nil } -func containerRootfsPath(uvm *uvm.UtilityVM, rootPath string) string { - if uvm.OS() == "windows" { - return ospath.Join(uvm.OS(), rootPath) +func containerRootfsPath(vm *uvm.UtilityVM, rootPath string) string { + if vm.OS() == "windows" { + return ospath.Join(vm.OS(), rootPath) } - return ospath.Join(uvm.OS(), rootPath, uvmpkg.RootfsPath) + return ospath.Join(vm.OS(), rootPath, uvm.RootfsPath) } func getScratchVHDPath(layerFolders []string) (string, error) { diff --git a/internal/regstate/regstate.go b/internal/regstate/regstate.go index 6086c1dc52..dcbc9334d7 100644 --- a/internal/regstate/regstate.go +++ b/internal/regstate/regstate.go @@ -34,11 +34,11 @@ var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} var rootPath = `SOFTWARE\Microsoft\runhcs` type NotFoundError struct { - Id string + ID string } func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.Id) + return fmt.Sprintf("ID '%s' was not found", err.ID) } func IsNotFoundError(err error) bool { diff --git a/internal/tools/securitypolicy/main.go b/internal/tools/securitypolicy/main.go index 6c765cf222..926a91bc7e 100644 --- a/internal/tools/securitypolicy/main.go +++ b/internal/tools/securitypolicy/main.go @@ -19,9 +19,9 @@ import ( var ( configFile = flag.String("c", "", "config") - outputJson = flag.Bool("j", false, "json") - username = flag.String("u", "", "username") - password = flag.String("p", "", "password") + outputJSON = flag.Bool("j", false, "json") + username = flag.String("u", "", "username") + password = flag.String("p", "", "password") ) func main() { @@ -63,7 +63,7 @@ func main() { if err != nil { return err } - if *outputJson { + if *outputJSON { fmt.Printf("%s\n", j) } b := base64.StdEncoding.EncodeToString(j) @@ -124,7 +124,7 @@ func createPolicyFromConfig(config Config) (sp.SecurityPolicy, error) { } ref, err := name.ParseReference(image.Name) if err != nil { - return p, fmt.Errorf("'%s' isn't a valid image name\n", image.Name) + return p, fmt.Errorf("'%s' isn't a valid image name", image.Name) } img, err := remote.Image(ref, imageOptions...) if err != nil { @@ -168,8 +168,8 @@ func createPolicyFromConfig(config Config) (sp.SecurityPolicy, error) { return p, err } hash := dmverity.RootHash(tree) - hash_string := fmt.Sprintf("%x", hash) - container.Layers = append(container.Layers, hash_string) + hashString := fmt.Sprintf("%x", hash) + container.Layers = append(container.Layers, hashString) } p.Containers = append(p.Containers, container) diff --git a/internal/uvm/clone.go b/internal/uvm/clone.go index 9cc574612a..4e2f95a148 100644 --- a/internal/uvm/clone.go +++ b/internal/uvm/clone.go @@ -12,7 +12,7 @@ import ( const ( hcsComputeSystemSaveType = "AsTemplate" // default namespace ID used for all template and clone VMs. - DEFAULT_CLONE_NETWORK_NAMESPACE_ID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" + DefaultCloneNetworkNamespaceID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" ) // Cloneable is a generic interface for cloning a specific resource. Not all resources can diff --git a/internal/uvm/combine_layers.go b/internal/uvm/combine_layers.go index 5971cc49dc..ff85fcffa5 100644 --- a/internal/uvm/combine_layers.go +++ b/internal/uvm/combine_layers.go @@ -35,7 +35,7 @@ func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcssch // // NOTE: `layerPaths`, `scrathPath`, and `rootfsPath` are paths from within the // UVM. -func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerId string, layerPaths []string, scratchPath, rootfsPath string) error { +func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerID string, layerPaths []string, scratchPath, rootfsPath string) error { if uvm.operatingSystem != "linux" { return errNotSupported } @@ -49,7 +49,7 @@ func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerId string, ResourceType: guestrequest.ResourceTypeCombinedLayers, RequestType: requesttype.Add, Settings: guestrequest.LCOWCombinedLayers{ - ContainerID: containerId, + ContainerID: containerID, ContainerRootPath: rootfsPath, Layers: layers, ScratchPath: scratchPath, diff --git a/internal/uvm/create.go b/internal/uvm/create.go index 6f7f844462..994f96beac 100644 --- a/internal/uvm/create.go +++ b/internal/uvm/create.go @@ -152,13 +152,13 @@ func verifyOptions(ctx context.Context, options interface{}) error { return errors.New("at least 2 LayerFolders must be supplied") } if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { - return errors.New("clone configuration doesn't match with template configuration.") + return errors.New("clone configuration doesn't match with template configuration") } if opts.IsClone && opts.TemplateConfig == nil { return errors.New("template config can not be nil when creating clone") } if opts.IsTemplate && opts.FullyPhysicallyBacked { - return errors.New("Template can not be created from a full physically backed UVM") + return errors.New("template can not be created from a full physically backed UVM") } } return nil diff --git a/internal/uvm/create_wcow.go b/internal/uvm/create_wcow.go index 8ddd3c3d2e..b00b0feee3 100644 --- a/internal/uvm/create_wcow.go +++ b/internal/uvm/create_wcow.go @@ -316,7 +316,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error if uvm.namespaces == nil { uvm.namespaces = make(map[string]*namespaceInfo) } - uvm.namespaces[DEFAULT_CLONE_NETWORK_NAMESPACE_ID] = &namespaceInfo{ + uvm.namespaces[DefaultCloneNetworkNamespaceID] = &namespaceInfo{ nics: make(map[string]*nicInfo), } uvm.IsClone = true diff --git a/internal/uvm/network.go b/internal/uvm/network.go index fcec86de66..9e0a56dccc 100644 --- a/internal/uvm/network.go +++ b/internal/uvm/network.go @@ -43,7 +43,7 @@ var ( func (uvm *UtilityVM) SetupNetworkNamespace(ctx context.Context, nsid string) error { nsidInsideUVM := nsid if uvm.IsTemplate || uvm.IsClone { - nsidInsideUVM = DEFAULT_CLONE_NETWORK_NAMESPACE_ID + nsidInsideUVM = DefaultCloneNetworkNamespaceID } // Query endpoints with actual nsid diff --git a/internal/uvm/scsi.go b/internal/uvm/scsi.go index 2e0e72934b..1b4e905f00 100644 --- a/internal/uvm/scsi.go +++ b/internal/uvm/scsi.go @@ -507,7 +507,7 @@ func (sm *SCSIMount) GobDecode(data []byte) error { return fmt.Errorf(errMsgFmt, err) } if sm.serialVersionID != scsiCurrentSerialVersionID { - return fmt.Errorf("Serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) + return fmt.Errorf("serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) } if err := decoder.Decode(&sm.HostPath); err != nil { return fmt.Errorf(errMsgFmt, err) diff --git a/internal/uvm/vsmb.go b/internal/uvm/vsmb.go index 27b20f3675..11d5a89b37 100644 --- a/internal/uvm/vsmb.go +++ b/internal/uvm/vsmb.go @@ -362,7 +362,7 @@ func (vsmb *VSMBShare) GobDecode(data []byte) error { return fmt.Errorf(errMsgFmt, err) } if vsmb.serialVersionID != vsmbCurrentSerialVersionID { - return fmt.Errorf("Serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) + return fmt.Errorf("serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) } if err := decoder.Decode(&vsmb.HostPath); err != nil { return fmt.Errorf(errMsgFmt, err) diff --git a/internal/vm/remotevm/network.go b/internal/vm/remotevm/network.go index ceddeb4752..25a634cb8a 100644 --- a/internal/vm/remotevm/network.go +++ b/internal/vm/remotevm/network.go @@ -21,8 +21,8 @@ func getSwitchID(endpointID, portID string) (string, error) { type ExtraInfo struct { Allocators []struct { - SwitchId string - EndpointPortGuid string + SwitchID string `json:"SwitchId"` + EndpointPortGUID string `json:"EndpointPortGuid"` } } @@ -40,8 +40,8 @@ func getSwitchID(endpointID, portID string) (string, error) { // that actually contains a switch ID and that has the matching port GUID we made earlier. var switchID string for _, allocator := range exi.Allocators { - if allocator.SwitchId != "" && strings.ToLower(allocator.EndpointPortGuid) == portID { - switchID = allocator.SwitchId + if allocator.SwitchID != "" && strings.ToLower(allocator.EndpointPortGUID) == portID { + switchID = allocator.SwitchID break } } diff --git a/internal/wclayer/legacy.go b/internal/wclayer/legacy.go index 83ba72cfad..b7f3064f26 100644 --- a/internal/wclayer/legacy.go +++ b/internal/wclayer/legacy.go @@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) { defer tf.Close() s := bufio.NewScanner(tf) if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") + return nil, errors.New("invalid tombstones file") } ts := make(map[string]([]string)) diff --git a/pkg/securitypolicy/securitypolicy_test.go b/pkg/securitypolicy/securitypolicy_test.go index cf948ae202..6eccecd4e7 100644 --- a/pkg/securitypolicy/securitypolicy_test.go +++ b/pkg/securitypolicy/securitypolicy_test.go @@ -113,7 +113,7 @@ func Test_EnforceOverlayMountPolicy_No_Matches(t *testing.T) { } r := rand.New(rand.NewSource(time.Now().UnixNano())) - containerID := generateContainerId(r) + containerID := generateContainerID(r) container := selectContainerFromPolicy(p, r) layerPaths, err := createInvalidOverlayForContainer(policy, container, r) @@ -143,7 +143,7 @@ func Test_EnforceOverlayMountPolicy_Matches(t *testing.T) { } r := rand.New(rand.NewSource(time.Now().UnixNano())) - containerID := generateContainerId(r) + containerID := generateContainerID(r) container := selectContainerFromPolicy(p, r) layerPaths, err := createValidOverlayForContainer(policy, container, r) @@ -172,7 +172,7 @@ func Test_EnforceOverlayMountPolicy_Overlay_Single_Container_Twice(t *testing.T) t.Fatalf("expected nil error got: %v", err) } - containerID := generateContainerId(r) + containerID := generateContainerID(r) container := selectContainerFromPolicy(p, r) layerPaths, err := createValidOverlayForContainer(policy, container, r) @@ -230,7 +230,7 @@ func Test_EnforceOverlayMountPolicy_Multiple_Instances_Same_Container(t *testing idUnique := false var id string for idUnique == false { - id = generateContainerId(r) + id = generateContainerID(r) _, found := idsUsed[id] idUnique = !found idsUsed[id] = true @@ -261,8 +261,8 @@ func Test_EnforceOverlayMountPolicy_Overlay_Single_Container_Twice_With_Differen var containerIDOne, containerIDTwo string for containerIDOne == containerIDTwo { - containerIDOne = generateContainerId(r) - containerIDTwo = generateContainerId(r) + containerIDOne = generateContainerID(r) + containerIDTwo = generateContainerID(r) } container := selectContainerFromPolicy(p, r) @@ -290,7 +290,7 @@ func Test_EnforceCommandPolicy_Matches(t *testing.T) { } r := rand.New(rand.NewSource(time.Now().UnixNano())) - containerID := generateContainerId(r) + containerID := generateContainerID(r) container := selectContainerFromPolicy(p, r) layerPaths, err := createValidOverlayForContainer(policy, container, r) @@ -322,7 +322,7 @@ func Test_EnforceCommandPolicy_NoMatches(t *testing.T) { } r := rand.New(rand.NewSource(time.Now().UnixNano())) - containerID := generateContainerId(r) + containerID := generateContainerID(r) container := selectContainerFromPolicy(p, r) layerPaths, err := createValidOverlayForContainer(policy, container, r) @@ -372,14 +372,14 @@ func Test_EnforceCommandPolicy_NarrowingMatches(t *testing.T) { return false } - testContainerOneId := "" - testContainerTwoId := "" + testcontainerOneID := "" + testcontainerTwoID := "" indexForContainerOne := -1 indexForContainerTwo := -1 // mount and overlay all our containers for index, container := range p.Containers { - containerID := generateContainerId(r) + containerID := generateContainerID(r) layerPaths, err := createValidOverlayForContainer(policy, container, r) if err != nil { @@ -392,11 +392,11 @@ func Test_EnforceCommandPolicy_NarrowingMatches(t *testing.T) { } if cmp.Equal(container, testContainerOne) { - testContainerOneId = containerID + testcontainerOneID = containerID indexForContainerOne = index } if cmp.Equal(container, testContainerTwo) { - testContainerTwoId = containerID + testcontainerTwoID = containerID indexForContainerTwo = index } } @@ -407,7 +407,7 @@ func Test_EnforceCommandPolicy_NarrowingMatches(t *testing.T) { return false } for _, id := range containerOneMapping { - if (id != testContainerOneId) && (id != testContainerTwoId) { + if (id != testcontainerOneID) && (id != testcontainerTwoID) { return false } } @@ -417,14 +417,14 @@ func Test_EnforceCommandPolicy_NarrowingMatches(t *testing.T) { return false } for _, id := range containerTwoMapping { - if (id != testContainerOneId) && (id != testContainerTwoId) { + if (id != testcontainerOneID) && (id != testcontainerTwoID) { return false } } // enforce command policy for containerOne // this will narrow our list of possible ids down - err = policy.EnforceCommandPolicy(testContainerOneId, testContainerOne.Command) + err = policy.EnforceCommandPolicy(testcontainerOneID, testContainerOne.Command) if err != nil { return false } @@ -436,7 +436,7 @@ func Test_EnforceCommandPolicy_NarrowingMatches(t *testing.T) { return false } for _, id := range updatedMapping { - if id != testContainerTwoId { + if id != testcontainerTwoID { return false } } @@ -517,7 +517,7 @@ func selectRootHashFromPolicy(policy *SecurityPolicy, r *rand.Rand) string { return container.Layers[r.Intn(numberOfLayersInContainer)] } -func generateContainerId(r *rand.Rand) string { +func generateContainerID(r *rand.Rand) string { id := atLeastOneAtMost(r, maxGeneratedContainerID) return strconv.FormatInt(int64(id), 10) } diff --git a/pkg/securitypolicy/securitypolicyenforcer.go b/pkg/securitypolicy/securitypolicyenforcer.go index d8eb86daa3..a3513009e4 100644 --- a/pkg/securitypolicy/securitypolicyenforcer.go +++ b/pkg/securitypolicy/securitypolicyenforcer.go @@ -126,7 +126,7 @@ func (policyState *StandardSecurityPolicyEnforcer) EnforcePmemMountPolicy(target } if deviceHash == "" { - return errors.New("device is missing verity root hash.") + return errors.New("device is missing verity root hash") } found := false diff --git a/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 0000000000..16b25be554 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,96 @@ +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go index 9f6317cc05..df3a59a78c 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go @@ -122,12 +122,15 @@ func defaultQuery() HostComputeQuery { // PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("Platform does not support feature %s", featureName) + return fmt.Errorf("platform does not support feature %s", featureName) } // V2ApiSupported returns an error if the HCN version does not support the V2 Apis. func V2ApiSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.Api.V2 { return nil } @@ -143,7 +146,10 @@ func V2SchemaVersion() SchemaVersion { // RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. func RemoteSubnetSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.RemoteSubnet { return nil } @@ -152,7 +158,10 @@ func RemoteSubnetSupported() error { // HostRouteSupported returns an error if the HCN version does not support Host Route policies. func HostRouteSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.HostRoute { return nil } @@ -161,7 +170,10 @@ func HostRouteSupported() error { // DSRSupported returns an error if the HCN version does not support Direct Server Return. func DSRSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.DSR { return nil } @@ -170,7 +182,10 @@ func DSRSupported() error { // Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. func Slash32EndpointPrefixesSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.Slash32EndpointPrefixes { return nil } @@ -179,7 +194,10 @@ func Slash32EndpointPrefixesSupported() error { // AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. func AclSupportForProtocol252Supported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.AclSupportForProtocol252 { return nil } @@ -188,7 +206,10 @@ func AclSupportForProtocol252Supported() error { // SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. func SessionAffinitySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.SessionAffinity { return nil } @@ -197,7 +218,10 @@ func SessionAffinitySupported() error { // IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. func IPv6DualStackSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.IPv6DualStack { return nil } @@ -206,7 +230,10 @@ func IPv6DualStackSupported() error { //L4proxySupported returns an error if the HCN verison does not support L4Proxy func L4proxyPolicySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.L4Proxy { return nil } @@ -215,7 +242,10 @@ func L4proxyPolicySupported() error { // L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy func L4WfpProxyPolicySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.L4WfpProxy { return nil } @@ -224,7 +254,10 @@ func L4WfpProxyPolicySupported() error { // SetPolicySupported returns an error if the HCN version does not support SetPolicy. func SetPolicySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.SetPolicy { return nil } @@ -233,7 +266,10 @@ func SetPolicySupported() error { // VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. func VxlanPortSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.VxlanPort { return nil } @@ -242,7 +278,10 @@ func VxlanPortSupported() error { // TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. func TierAclPolicySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.TierAcl { return nil } @@ -251,7 +290,10 @@ func TierAclPolicySupported() error { // NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy func NetworkACLPolicySupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.NetworkACL { return nil } @@ -260,14 +302,16 @@ func NetworkACLPolicySupported() error { // NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet func NestedIpSetSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.NestedIpSet { return nil } return platformDoesNotSupportError("NestedIpSet") } - // RequestType are the different operations performed to settings. // Used to update the settings of Endpoint/Namespace objects. type RequestType string diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go index d6d27079bc..52e2498462 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go +++ b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go @@ -137,7 +137,7 @@ func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) if len(endpoints) <= 0 { - return nil, errors.New("Missing endpoints") + return nil, errors.New("missing endpoints") } route := &HostComputeRoute{ diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go index 4be8df17db..bacb91feda 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -4,10 +4,17 @@ import ( "fmt" "sync" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -var versionOnce sync.Once +var ( + // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work + // multiple times. + featuresOnce sync.Once + featuresErr error + supportedFeatures SupportedFeatures +) // SupportedFeatures are the features provided by the Service. type SupportedFeatures struct { @@ -43,17 +50,41 @@ type ApiSupport struct { V2 bool `json:"V2"` } +// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called +// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the +// various hcn.IsXSupported methods need to be made. +func GetCachedSupportedFeatures() (SupportedFeatures, error) { + // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to + // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant + // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy + // for example. + featuresOnce.Do(func() { + supportedFeatures, featuresErr = getSupportedFeatures() + }) + + return supportedFeatures, featuresErr +} + // GetSupportedFeatures returns the features supported by the Service. +// +// Deprecated: Use GetCachedSupportedFeatures instead. func GetSupportedFeatures() SupportedFeatures { - var features SupportedFeatures - - globals, err := GetGlobals() + features, err := GetCachedSupportedFeatures() if err != nil { // Expected on pre-1803 builds, all features will be false/unsupported - logrus.Debugf("Unable to obtain globals: %s", err) + logrus.WithError(err).Errorf("unable to obtain supported features") return features } + return features +} +func getSupportedFeatures() (SupportedFeatures, error) { + var features SupportedFeatures + globals, err := GetGlobals() + if err != nil { + // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. + return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") + } features.Acl = AclFeatures{ AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), @@ -81,18 +112,12 @@ func GetSupportedFeatures() SupportedFeatures { features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) - // Only print the HCN version and features supported once, instead of everytime this is invoked. These logs are useful to - // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant - // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kubeproxy - // for example. - versionOnce.Do(func() { - logrus.WithFields(logrus.Fields{ - "version": fmt.Sprintf("%+v", globals.Version), - "supportedFeatures": fmt.Sprintf("%+v", features), - }).Info("HCN feature check") - }) + logrus.WithFields(logrus.Fields{ + "version": fmt.Sprintf("%+v", globals.Version), + "supportedFeatures": fmt.Sprintf("%+v", features), + }).Info("HCN feature check") - return features + return features, nil } func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 408312672e..9e0059447d 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -7,6 +7,9 @@ import ( // HNSEndpoint represents a network endpoint in HNS type HNSEndpoint = hns.HNSEndpoint +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + // Namespace represents a Compartment. type Namespace = hns.Namespace @@ -108,3 +111,8 @@ func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { return hns.GetHNSEndpointByName(endpointName) } + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go index d1b0a729c1..cd9ca03f7e 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go @@ -135,7 +135,7 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er } if coi.HostingSystem != nil && coi.templateID != "" && !coi.HostingSystem.IsClone { - return fmt.Errorf("A container can not be cloned inside a non cloned POD") + return fmt.Errorf("a container can not be cloned inside a non cloned POD") } if coi.templateID != "" { @@ -152,11 +152,11 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate { if len(coi.Spec.Windows.Devices) != 0 { - return fmt.Errorf("Mapped Devices are not supported for template containers") + return fmt.Errorf("mapped Devices are not supported for template containers") } if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - return fmt.Errorf("gMSA specifications are not supported for template containers") + return fmt.Errorf("gmsa specifications are not supported for template containers") } if coi.Spec.Windows.Servicing { @@ -179,7 +179,7 @@ func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions) } if coi.Spec == nil { - return nil, fmt.Errorf("Spec must be supplied") + return nil, fmt.Errorf("spec must be supplied") } // Defaults if omitted by caller. diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go index af9e6deb38..f27b5da27e 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go @@ -269,7 +269,7 @@ func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInter // Use the reserved network namespace for containers created inside // cloned or template UVMs. if coi.HostingSystem != nil && (coi.HostingSystem.IsTemplate || coi.HostingSystem.IsClone) { - v2Container.Networking.Namespace = uvm.DEFAULT_CLONE_NETWORK_NAMESPACE_ID + v2Container.Networking.Namespace = uvm.DefaultCloneNetworkNamespaceID } else { v2Container.Networking.Namespace = coi.actualNetworkNamespace } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 6f899c0d05..7cf954c7b2 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -31,6 +31,7 @@ type HNSEndpoint struct { EnableLowMetric bool `json:",omitempty"` Namespace *Namespace `json:",omitempty"` EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` } //SystemType represents the type of the system on which actions are done @@ -58,6 +59,18 @@ type EndpointResquestResponse struct { Error string } +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + // HNSEndpointRequest makes a HNS call to modify/query a network endpoint func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { endpoint := &HNSEndpoint{} @@ -80,11 +93,27 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) { return endpoint, nil } +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + // GetHNSEndpointByID get the Endpoint by ID func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { return HNSEndpointRequest("GET", endpointID, "") } +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + // GetHNSEndpointByName gets the endpoint filtered by Name func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { hnsResponse, err := HNSListEndpointRequest() diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go index 6b69ce9bbe..146f9accf2 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go @@ -8,13 +8,13 @@ import ( "fmt" "os" "path/filepath" + "time" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/ospath" "github.com/Microsoft/hcsshim/internal/uvm" - uvmpkg "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -77,10 +77,10 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { // // TODO dcantah: Keep better track of the layers that are added, don't simply discard the SCSI, VSMB, etc. resource types gotten inside. -func MountContainerLayers(ctx context.Context, containerId string, layerFolders []string, guestRoot string, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) { +func MountContainerLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, err error) { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers") - if uvm == nil { + if vm == nil { if len(layerFolders) < 2 { return "", errors.New("need at least two layers - base and scratch") } @@ -115,6 +115,9 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders // for ERROR_NOT_READY as well. if hcserr, ok := lErr.(*hcserror.HcsError); ok { if hcserr.Err == windows.ERROR_NOT_READY || hcserr.Err == windows.ERROR_DEVICE_NOT_CONNECTED { + // Sleep for a little before a re-attempt. A probable cause for these issues in the first place is events not getting + // reported in time so might be good to give some time for things to "cool down" or get back to a known state. + time.Sleep(time.Millisecond * 100) continue } } @@ -155,7 +158,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders } // V2 UVM - log.G(ctx).WithField("os", uvm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM") + log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM") var ( layersAdded []string @@ -163,15 +166,15 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders ) defer func() { if err != nil { - if uvm.OS() == "windows" { + if vm.OS() == "windows" { for _, l := range layersAdded { - if err := uvm.RemoveVSMB(ctx, l, true); err != nil { + if err := vm.RemoveVSMB(ctx, l, true); err != nil { log.G(ctx).WithError(err).Warn("failed to remove wcow layer on cleanup") } } } else { for _, l := range layersAdded { - if err := removeLCOWLayer(ctx, uvm, l); err != nil { + if err := removeLCOWLayer(ctx, vm, l); err != nil { log.G(ctx).WithError(err).Warn("failed to remove lcow layer on cleanup") } } @@ -181,13 +184,13 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders for _, layerPath := range layerFolders[:len(layerFolders)-1] { log.G(ctx).WithField("layerPath", layerPath).Debug("mounting layer") - if uvm.OS() == "windows" { - options := uvm.DefaultVSMBOptions(true) + if vm.OS() == "windows" { + options := vm.DefaultVSMBOptions(true) options.TakeBackupPrivilege = true - if uvm.IsTemplate { - uvm.SetSaveableVSMBOptions(options, options.ReadOnly) + if vm.IsTemplate { + vm.SetSaveableVSMBOptions(options, options.ReadOnly) } - if _, err := uvm.AddVSMB(ctx, layerPath, options); err != nil { + if _, err := vm.AddVSMB(ctx, layerPath, options); err != nil { return "", fmt.Errorf("failed to add VSMB layer: %s", err) } layersAdded = append(layersAdded, layerPath) @@ -196,7 +199,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders layerPath = filepath.Join(layerPath, "layer.vhd") uvmPath string ) - uvmPath, err = addLCOWLayer(ctx, uvm, layerPath) + uvmPath, err = addLCOWLayer(ctx, vm, layerPath) if err != nil { return "", fmt.Errorf("failed to add LCOW layer: %s", err) } @@ -205,7 +208,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders } } - containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot) + containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot) hostPath, err := getScratchVHDPath(layerFolders) if err != nil { return "", fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err) @@ -213,7 +216,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") var options []string - scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvmpkg.VMAccessTypeIndividual) + scsiMount, err := vm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvm.VMAccessTypeIndividual) if err != nil { return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } @@ -221,7 +224,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders // This handles the case where we want to share a scratch disk for multiple containers instead // of mounting a new one. Pass a unique value for `ScratchPath` to avoid container upper and // work directories colliding in the UVM. - if scsiMount.RefCount() > 1 && uvm.OS() == "linux" { + if scsiMount.RefCount() > 1 && vm.OS() == "linux" { scratchFmt := fmt.Sprintf("container_%s", filepath.Base(containerScratchPathInUVM)) containerScratchPathInUVM = ospath.Join("linux", scsiMount.UVMPath, scratchFmt) } else { @@ -230,26 +233,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders defer func() { if err != nil { - if err := uvm.RemoveSCSI(ctx, hostPath); err != nil { + if err := vm.RemoveSCSI(ctx, hostPath); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup") } } }() var rootfs string - if uvm.OS() == "windows" { + if vm.OS() == "windows" { // Load the filter at the C:\s location calculated above. We pass into this request each of the // read-only layer folders. var layers []hcsschema.Layer - layers, err = GetHCSLayers(ctx, uvm, layersAdded) + layers, err = GetHCSLayers(ctx, vm, layersAdded) if err != nil { return "", err } - err = uvm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM) + err = vm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM) rootfs = containerScratchPathInUVM } else { - rootfs = ospath.Join(uvm.OS(), guestRoot, uvmpkg.RootfsPath) - err = uvm.CombineLayersLCOW(ctx, containerId, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) + rootfs = ospath.Join(vm.OS(), guestRoot, uvm.RootfsPath) + err = vm.CombineLayersLCOW(ctx, containerID, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) } if err != nil { return "", err @@ -258,26 +261,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders return rootfs, nil } -func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) (uvmPath string, err error) { +func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) (uvmPath string, err error) { // don't try to add as vpmem when we want additional devices on the uvm to be fully physically backed - if !uvm.DevicesPhysicallyBacked() { + if !vm.DevicesPhysicallyBacked() { // We first try vPMEM and if it is full or the file is too large we // fall back to SCSI. - uvmPath, err = uvm.AddVPMem(ctx, layerPath) + uvmPath, err = vm.AddVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Added LCOW layer") return uvmPath, nil - } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMemLayerSize { + } else if err != uvm.ErrNoAvailableLocation && err != uvm.ErrMaxVPMemLayerSize { return "", fmt.Errorf("failed to add VPMEM layer: %s", err) } } options := []string{"ro"} - uvmPath = fmt.Sprintf(uvmpkg.LCOWGlobalMountPrefix, uvm.UVMMountCounter()) - sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvmpkg.VMAccessTypeNoop) + uvmPath = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, vm.UVMMountCounter()) + sm, err := vm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvm.VMAccessTypeNoop) if err != nil { return "", fmt.Errorf("failed to add SCSI layer: %s", err) } @@ -288,17 +291,17 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) return sm.UVMPath, nil } -func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) error { +func removeLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) error { // Assume it was added to vPMEM and fall back to SCSI - err := uvm.RemoveVPMem(ctx, layerPath) + err := vm.RemoveVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Removed LCOW layer") return nil - } else if err == uvmpkg.ErrNotAttached { - err = uvm.RemoveSCSI(ctx, layerPath) + } else if err == uvm.ErrNotAttached { + err = vm.RemoveSCSI(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, @@ -327,9 +330,9 @@ const ( ) // UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting -func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, uvm *uvmpkg.UtilityVM, op UnmountOperation) error { +func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, vm *uvm.UtilityVM, op UnmountOperation) error { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::unmountContainerLayers") - if uvm == nil { + if vm == nil { // Must be an argon - folders are mounted on the host if op != UnmountOperationAll { return errors.New("only operation supported for host-mounted folders is unmountOperationAll") @@ -363,13 +366,13 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Always remove the combined layers as they are part of scsi/vsmb/vpmem // removals. - if uvm.OS() == "windows" { - if err := uvm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { + if vm.OS() == "windows" { + if err := vm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") retError = err } } else { - if err := uvm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { + if err := vm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") retError = err } @@ -381,7 +384,7 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe if err != nil { return errors.Wrap(err, "failed to get scratch VHD path in layer folders") } - if err := uvm.RemoveSCSI(ctx, hostScratchFile); err != nil { + if err := vm.RemoveSCSI(ctx, hostScratchFile); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch") if retError == nil { retError = err @@ -394,9 +397,9 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Remove each of the read-only layers from VSMB. These's are ref-counted and // only removed once the count drops to zero. This allows multiple containers // to share layers. - if uvm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB { + if vm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB { for _, layerPath := range layerFolders[:len(layerFolders)-1] { - if e := uvm.RemoveVSMB(ctx, layerPath, true); e != nil { + if e := vm.RemoveVSMB(ctx, layerPath, true); e != nil { log.G(ctx).WithError(e).Warn("remove VSMB failed") if retError == nil { retError = e @@ -410,10 +413,10 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted // and only removed once the count drops to zero. This allows multiple containers to // share layers. Note that SCSI is used on large layers. - if uvm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { + if vm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { for _, layerPath := range layerFolders[:len(layerFolders)-1] { hostPath := filepath.Join(layerPath, "layer.vhd") - if err := removeLCOWLayer(ctx, uvm, hostPath); err != nil { + if err := removeLCOWLayer(ctx, vm, hostPath); err != nil { log.G(ctx).WithError(err).Warn("remove layer failed") if retError == nil { retError = err @@ -443,11 +446,11 @@ func GetHCSLayers(ctx context.Context, vm *uvm.UtilityVM, paths []string) (layer return layers, nil } -func containerRootfsPath(uvm *uvm.UtilityVM, rootPath string) string { - if uvm.OS() == "windows" { - return ospath.Join(uvm.OS(), rootPath) +func containerRootfsPath(vm *uvm.UtilityVM, rootPath string) string { + if vm.OS() == "windows" { + return ospath.Join(vm.OS(), rootPath) } - return ospath.Join(uvm.OS(), rootPath, uvmpkg.RootfsPath) + return ospath.Join(vm.OS(), rootPath, uvm.RootfsPath) } func getScratchVHDPath(layerFolders []string) (string, error) { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go index df8ee7b294..d56611162a 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go @@ -166,7 +166,7 @@ const ( // AnnotationVPMemNoMultiMapping indicates that we should disable LCOW vpmem layer multi mapping AnnotationVPMemNoMultiMapping = "io.microsoft.virtualmachine.lcow.vpmem.nomultimapping" - // AnnotationKernelBootOptions is used to specify kernel options used while booting a linux kernerl + // AnnotationKernelBootOptions is used to specify kernel options used while booting a linux kernel AnnotationKernelBootOptions = "io.microsoft.virtualmachine.lcow.kernelbootoptions" // AnnotationStorageQoSBandwidthMaximum indicates the maximum number of bytes per second. If `0` diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go index 6086c1dc52..dcbc9334d7 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -34,11 +34,11 @@ var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} var rootPath = `SOFTWARE\Microsoft\runhcs` type NotFoundError struct { - Id string + ID string } func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.Id) + return fmt.Sprintf("ID '%s' was not found", err.ID) } func IsNotFoundError(err error) bool { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go index 9cc574612a..4e2f95a148 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go @@ -12,7 +12,7 @@ import ( const ( hcsComputeSystemSaveType = "AsTemplate" // default namespace ID used for all template and clone VMs. - DEFAULT_CLONE_NETWORK_NAMESPACE_ID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" + DefaultCloneNetworkNamespaceID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" ) // Cloneable is a generic interface for cloning a specific resource. Not all resources can diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go index 5971cc49dc..ff85fcffa5 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go @@ -35,7 +35,7 @@ func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcssch // // NOTE: `layerPaths`, `scrathPath`, and `rootfsPath` are paths from within the // UVM. -func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerId string, layerPaths []string, scratchPath, rootfsPath string) error { +func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerID string, layerPaths []string, scratchPath, rootfsPath string) error { if uvm.operatingSystem != "linux" { return errNotSupported } @@ -49,7 +49,7 @@ func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerId string, ResourceType: guestrequest.ResourceTypeCombinedLayers, RequestType: requesttype.Add, Settings: guestrequest.LCOWCombinedLayers{ - ContainerID: containerId, + ContainerID: containerID, ContainerRootPath: rootfsPath, Layers: layers, ScratchPath: scratchPath, diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go index 6f7f844462..994f96beac 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go @@ -152,13 +152,13 @@ func verifyOptions(ctx context.Context, options interface{}) error { return errors.New("at least 2 LayerFolders must be supplied") } if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { - return errors.New("clone configuration doesn't match with template configuration.") + return errors.New("clone configuration doesn't match with template configuration") } if opts.IsClone && opts.TemplateConfig == nil { return errors.New("template config can not be nil when creating clone") } if opts.IsTemplate && opts.FullyPhysicallyBacked { - return errors.New("Template can not be created from a full physically backed UVM") + return errors.New("template can not be created from a full physically backed UVM") } } return nil diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go index 8ddd3c3d2e..b00b0feee3 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go @@ -316,7 +316,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error if uvm.namespaces == nil { uvm.namespaces = make(map[string]*namespaceInfo) } - uvm.namespaces[DEFAULT_CLONE_NETWORK_NAMESPACE_ID] = &namespaceInfo{ + uvm.namespaces[DefaultCloneNetworkNamespaceID] = &namespaceInfo{ nics: make(map[string]*nicInfo), } uvm.IsClone = true diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go index fcec86de66..9e0a56dccc 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go @@ -43,7 +43,7 @@ var ( func (uvm *UtilityVM) SetupNetworkNamespace(ctx context.Context, nsid string) error { nsidInsideUVM := nsid if uvm.IsTemplate || uvm.IsClone { - nsidInsideUVM = DEFAULT_CLONE_NETWORK_NAMESPACE_ID + nsidInsideUVM = DefaultCloneNetworkNamespaceID } // Query endpoints with actual nsid diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go index 2e0e72934b..1b4e905f00 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go @@ -507,7 +507,7 @@ func (sm *SCSIMount) GobDecode(data []byte) error { return fmt.Errorf(errMsgFmt, err) } if sm.serialVersionID != scsiCurrentSerialVersionID { - return fmt.Errorf("Serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) + return fmt.Errorf("serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) } if err := decoder.Decode(&sm.HostPath); err != nil { return fmt.Errorf(errMsgFmt, err) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go index 27b20f3675..11d5a89b37 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go @@ -362,7 +362,7 @@ func (vsmb *VSMBShare) GobDecode(data []byte) error { return fmt.Errorf(errMsgFmt, err) } if vsmb.serialVersionID != vsmbCurrentSerialVersionID { - return fmt.Errorf("Serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) + return fmt.Errorf("serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) } if err := decoder.Decode(&vsmb.HostPath); err != nil { return fmt.Errorf(errMsgFmt, err) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 83ba72cfad..b7f3064f26 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) { defer tf.Close() s := bufio.NewScanner(tf) if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") + return nil, errors.New("invalid tombstones file") } ts := make(map[string]([]string)) diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go index 53484afd29..8d8bd7290b 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go +++ b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go @@ -105,7 +105,7 @@ func (policyState *StandardSecurityPolicyEnforcer) EnforcePmemMountPolicy(target } if deviceHash == "" { - return errors.New("device is missing verity root hash.") + return errors.New("device is missing verity root hash") } found := false @@ -136,8 +136,8 @@ func (policyState *StandardSecurityPolicyEnforcer) EnforceOverlayMountPolicy(con // find maximum number of containers that could share this overlay maxPossibleContainerIdsForOverlay := 0 - for _, device_list := range policyState.Devices { - if equalForOverlay(layerPaths, device_list) { + for _, deviceList := range policyState.Devices { + if equalForOverlay(layerPaths, deviceList) { maxPossibleContainerIdsForOverlay++ } } @@ -147,8 +147,8 @@ func (policyState *StandardSecurityPolicyEnforcer) EnforceOverlayMountPolicy(con return errors.New(errmsg) } - for i, device_list := range policyState.Devices { - if equalForOverlay(layerPaths, device_list) { + for i, deviceList := range policyState.Devices { + if equalForOverlay(layerPaths, deviceList) { existing := policyState.ContainerIndexToContainerIds[i] if len(existing) < maxPossibleContainerIdsForOverlay { policyState.ContainerIndexToContainerIds[i] = append(existing, containerID) @@ -167,9 +167,9 @@ func equalForOverlay(a1 []string, a2 []string) bool { // top to bottom (the order a string gets concatenated for the unix mount // command). W do our check with that in mind. if len(a1) == len(a2) { - top_index := len(a2) - 1 + topIndex := len(a2) - 1 for i, v := range a1 { - if v != a2[top_index-i] { + if v != a2[topIndex-i] { return false } }