Skip to content

Commit

Permalink
Fix golangci-lint stylecheck issues
Browse files Browse the repository at this point in the history
This change fixes the issues that the stylecheck linter for golangci-lint
brings up. Most of them are errors starting with a capital letter/ending with
punctuation, but there's two that are for double imports. Also adds -v to the
golangci-lint args, so it's easier to tell what's going on.

In internal/layers/layers.go we were importing the /internal/uvm package twice,
as we used any constants from the package under a package alias of uvmpkg and then
any uses of a pointer to a UtilityVM object were passed around as `uvm`. I've changed
the latter to be passed around via vm, as we use this elsewhere to store a UtilityVM
object, and then simply replaced umvpkg with the package name itself, uvm.

Signed-off-by: Daniel Canter <[email protected]>
  • Loading branch information
dcantah committed Aug 24, 2021
1 parent 64f3019 commit 35af99c
Show file tree
Hide file tree
Showing 15 changed files with 67 additions and 69 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ jobs:
uses: golangci/golangci-lint-action@v2
with:
version: v1.42.0 # Has fixes for stylecheck configuration https://github.com/golangci/golangci-lint/pull/2017/files
args: --timeout=5m
args: --timeout=5m -v

test:
runs-on: 'windows-2019'
Expand Down
5 changes: 2 additions & 3 deletions cmd/containerd-shim-runhcs-v1/task_hcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"

"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
"github.com/Microsoft/hcsshim/internal/cmd"
Expand Down Expand Up @@ -587,7 +586,7 @@ func (ht *hcsTask) DeleteExec(ctx context.Context, eid string) (int, uint32, tim
return int(status.Pid), status.ExitStatus, status.ExitedAt, nil
}

func (ht *hcsTask) Pids(ctx context.Context) ([]options.ProcessDetails, error) {
func (ht *hcsTask) Pids(ctx context.Context) ([]runhcsopts.ProcessDetails, error) {
// Map all user created exec's to pid/exec-id
pidMap := make(map[int]string)
ht.execs.Range(func(key, value interface{}) bool {
Expand All @@ -606,7 +605,7 @@ func (ht *hcsTask) Pids(ctx context.Context) ([]options.ProcessDetails, error) {
}

// Copy to pid/exec-id pair's
pairs := make([]options.ProcessDetails, len(props.ProcessList))
pairs := make([]runhcsopts.ProcessDetails, len(props.ProcessList))
for i, p := range props.ProcessList {
pairs[i].ImageName = p.ImageName
pairs[i].CreatedAt = p.CreateTimestamp
Expand Down
2 changes: 1 addition & 1 deletion cmd/gcstools/commoncli/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func SetFlagsForLogging() []*string {
// SetupLogging creates the logger from the command line parameters.
func SetupLogging(args ...*string) error {
if len(args) < 1 {
return fmt.Errorf("Invalid log params")
return fmt.Errorf("invalid log params")
}
level, err := logrus.ParseLevel(*args[1])
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/runhcs/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ status of "ubuntu01" as "stopped" the following will delete resources held for
kill = true
default:
if !force {
return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s)
return fmt.Errorf("cannot delete container %s that is not stopped: %s", id, s)
}
kill = true
}
Expand Down
4 changes: 2 additions & 2 deletions cmd/runhcs/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,12 +146,12 @@ func getProcessSpec(context *cli.Context, c *container) (*specs.Process, error)

func validateProcessSpec(spec *specs.Process) error {
if spec.Cwd == "" {
return fmt.Errorf("Cwd property must not be empty")
return fmt.Errorf("cwd property must not be empty")
}
// IsAbs doesnt recognize Unix paths on Windows builds so handle that case
// here.
if !filepath.IsAbs(spec.Cwd) && !strings.HasPrefix(spec.Cwd, "/") {
return fmt.Errorf("Cwd must be an absolute path")
return fmt.Errorf("cwd must be an absolute path")
}
if len(spec.Args) == 0 {
return fmt.Errorf("args must not be empty")
Expand Down
2 changes: 1 addition & 1 deletion hcn/hcn.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func defaultQuery() HostComputeQuery {

// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS
func platformDoesNotSupportError(featureName string) error {
return fmt.Errorf("Platform does not support feature %s", featureName)
return fmt.Errorf("platform does not support feature %s", featureName)
}

// V2ApiSupported returns an error if the HCN version does not support the V2 Apis.
Expand Down
2 changes: 1 addition & 1 deletion hcn/hcnroute.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop
logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation)

if len(endpoints) <= 0 {
return nil, errors.New("Missing endpoints")
return nil, errors.New("missing endpoints")
}

route := &HostComputeRoute{
Expand Down
8 changes: 4 additions & 4 deletions internal/hcsoci/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er
}

if coi.HostingSystem != nil && coi.templateID != "" && !coi.HostingSystem.IsClone {
return fmt.Errorf("A container can not be cloned inside a non cloned POD")
return fmt.Errorf("a container can not be cloned inside a non cloned POD")
}

if coi.templateID != "" {
Expand All @@ -152,11 +152,11 @@ func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) er

if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate {
if len(coi.Spec.Windows.Devices) != 0 {
return fmt.Errorf("Mapped Devices are not supported for template containers")
return fmt.Errorf("mapped Devices are not supported for template containers")
}

if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
return fmt.Errorf("gMSA specifications are not supported for template containers")
return fmt.Errorf("gmsa specifications are not supported for template containers")
}

if coi.Spec.Windows.Servicing {
Expand All @@ -179,7 +179,7 @@ func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions)
}

if coi.Spec == nil {
return nil, fmt.Errorf("Spec must be supplied")
return nil, fmt.Errorf("spec must be supplied")
}

// Defaults if omitted by caller.
Expand Down
91 changes: 45 additions & 46 deletions internal/layers/layers.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/ospath"
"github.com/Microsoft/hcsshim/internal/uvm"
uvmpkg "github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -78,10 +77,10 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error {
//
// TODO dcantah: Keep better track of the layers that are added, don't simply discard the SCSI, VSMB, etc. resource types gotten inside.

func MountContainerLayers(ctx context.Context, containerId string, layerFolders []string, guestRoot string, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) {
func MountContainerLayers(ctx context.Context, containerId string, layerFolders []string, guestRoot string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, err error) {
log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers")

if uvm == nil {
if vm == nil {
if len(layerFolders) < 2 {
return "", errors.New("need at least two layers - base and scratch")
}
Expand Down Expand Up @@ -159,23 +158,23 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders
}

// V2 UVM
log.G(ctx).WithField("os", uvm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM")
log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM")

var (
layersAdded []string
lcowUvmLayerPaths []string
)
defer func() {
if err != nil {
if uvm.OS() == "windows" {
if vm.OS() == "windows" {
for _, l := range layersAdded {
if err := uvm.RemoveVSMB(ctx, l, true); err != nil {
if err := vm.RemoveVSMB(ctx, l, true); err != nil {
log.G(ctx).WithError(err).Warn("failed to remove wcow layer on cleanup")
}
}
} else {
for _, l := range layersAdded {
if err := removeLCOWLayer(ctx, uvm, l); err != nil {
if err := removeLCOWLayer(ctx, vm, l); err != nil {
log.G(ctx).WithError(err).Warn("failed to remove lcow layer on cleanup")
}
}
Expand All @@ -185,13 +184,13 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders

for _, layerPath := range layerFolders[:len(layerFolders)-1] {
log.G(ctx).WithField("layerPath", layerPath).Debug("mounting layer")
if uvm.OS() == "windows" {
options := uvm.DefaultVSMBOptions(true)
if vm.OS() == "windows" {
options := vm.DefaultVSMBOptions(true)
options.TakeBackupPrivilege = true
if uvm.IsTemplate {
uvm.SetSaveableVSMBOptions(options, options.ReadOnly)
if vm.IsTemplate {
vm.SetSaveableVSMBOptions(options, options.ReadOnly)
}
if _, err := uvm.AddVSMB(ctx, layerPath, options); err != nil {
if _, err := vm.AddVSMB(ctx, layerPath, options); err != nil {
return "", fmt.Errorf("failed to add VSMB layer: %s", err)
}
layersAdded = append(layersAdded, layerPath)
Expand All @@ -200,7 +199,7 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders
layerPath = filepath.Join(layerPath, "layer.vhd")
uvmPath string
)
uvmPath, err = addLCOWLayer(ctx, uvm, layerPath)
uvmPath, err = addLCOWLayer(ctx, vm, layerPath)
if err != nil {
return "", fmt.Errorf("failed to add LCOW layer: %s", err)
}
Expand All @@ -209,23 +208,23 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders
}
}

containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot)
containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot)
hostPath, err := getScratchVHDPath(layerFolders)
if err != nil {
return "", fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err)
}
log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD")

var options []string
scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvmpkg.VMAccessTypeIndividual)
scsiMount, err := vm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvm.VMAccessTypeIndividual)
if err != nil {
return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err)
}

// This handles the case where we want to share a scratch disk for multiple containers instead
// of mounting a new one. Pass a unique value for `ScratchPath` to avoid container upper and
// work directories colliding in the UVM.
if scsiMount.RefCount() > 1 && uvm.OS() == "linux" {
if scsiMount.RefCount() > 1 && vm.OS() == "linux" {
scratchFmt := fmt.Sprintf("container_%s", filepath.Base(containerScratchPathInUVM))
containerScratchPathInUVM = ospath.Join("linux", scsiMount.UVMPath, scratchFmt)
} else {
Expand All @@ -234,26 +233,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders

defer func() {
if err != nil {
if err := uvm.RemoveSCSI(ctx, hostPath); err != nil {
if err := vm.RemoveSCSI(ctx, hostPath); err != nil {
log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup")
}
}
}()

var rootfs string
if uvm.OS() == "windows" {
if vm.OS() == "windows" {
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
// read-only layer folders.
var layers []hcsschema.Layer
layers, err = GetHCSLayers(ctx, uvm, layersAdded)
layers, err = GetHCSLayers(ctx, vm, layersAdded)
if err != nil {
return "", err
}
err = uvm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM)
err = vm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM)
rootfs = containerScratchPathInUVM
} else {
rootfs = ospath.Join(uvm.OS(), guestRoot, uvmpkg.RootfsPath)
err = uvm.CombineLayersLCOW(ctx, containerId, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs)
rootfs = ospath.Join(vm.OS(), guestRoot, uvm.RootfsPath)
err = vm.CombineLayersLCOW(ctx, containerId, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs)
}
if err != nil {
return "", err
Expand All @@ -262,26 +261,26 @@ func MountContainerLayers(ctx context.Context, containerId string, layerFolders
return rootfs, nil
}

func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) (uvmPath string, err error) {
func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) (uvmPath string, err error) {
// don't try to add as vpmem when we want additional devices on the uvm to be fully physically backed
if !uvm.DevicesPhysicallyBacked() {
if !vm.DevicesPhysicallyBacked() {
// We first try vPMEM and if it is full or the file is too large we
// fall back to SCSI.
uvmPath, err = uvm.AddVPMem(ctx, layerPath)
uvmPath, err = vm.AddVPMem(ctx, layerPath)
if err == nil {
log.G(ctx).WithFields(logrus.Fields{
"layerPath": layerPath,
"layerType": "vpmem",
}).Debug("Added LCOW layer")
return uvmPath, nil
} else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMemLayerSize {
} else if err != uvm.ErrNoAvailableLocation && err != uvm.ErrMaxVPMemLayerSize {
return "", fmt.Errorf("failed to add VPMEM layer: %s", err)
}
}

options := []string{"ro"}
uvmPath = fmt.Sprintf(uvmpkg.LCOWGlobalMountPrefix, uvm.UVMMountCounter())
sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvmpkg.VMAccessTypeNoop)
uvmPath = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, vm.UVMMountCounter())
sm, err := vm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvm.VMAccessTypeNoop)
if err != nil {
return "", fmt.Errorf("failed to add SCSI layer: %s", err)
}
Expand All @@ -292,17 +291,17 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string)
return sm.UVMPath, nil
}

func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) error {
func removeLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) error {
// Assume it was added to vPMEM and fall back to SCSI
err := uvm.RemoveVPMem(ctx, layerPath)
err := vm.RemoveVPMem(ctx, layerPath)
if err == nil {
log.G(ctx).WithFields(logrus.Fields{
"layerPath": layerPath,
"layerType": "vpmem",
}).Debug("Removed LCOW layer")
return nil
} else if err == uvmpkg.ErrNotAttached {
err = uvm.RemoveSCSI(ctx, layerPath)
} else if err == uvm.ErrNotAttached {
err = vm.RemoveSCSI(ctx, layerPath)
if err == nil {
log.G(ctx).WithFields(logrus.Fields{
"layerPath": layerPath,
Expand Down Expand Up @@ -331,9 +330,9 @@ const (
)

// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting
func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, uvm *uvmpkg.UtilityVM, op UnmountOperation) error {
func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, vm *uvm.UtilityVM, op UnmountOperation) error {
log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::unmountContainerLayers")
if uvm == nil {
if vm == nil {
// Must be an argon - folders are mounted on the host
if op != UnmountOperationAll {
return errors.New("only operation supported for host-mounted folders is unmountOperationAll")
Expand Down Expand Up @@ -367,13 +366,13 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe

// Always remove the combined layers as they are part of scsi/vsmb/vpmem
// removals.
if uvm.OS() == "windows" {
if err := uvm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil {
if vm.OS() == "windows" {
if err := vm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil {
log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers")
retError = err
}
} else {
if err := uvm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil {
if err := vm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil {
log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers")
retError = err
}
Expand All @@ -385,7 +384,7 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
if err != nil {
return errors.Wrap(err, "failed to get scratch VHD path in layer folders")
}
if err := uvm.RemoveSCSI(ctx, hostScratchFile); err != nil {
if err := vm.RemoveSCSI(ctx, hostScratchFile); err != nil {
log.G(ctx).WithError(err).Warn("failed to remove scratch")
if retError == nil {
retError = err
Expand All @@ -398,9 +397,9 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
// Remove each of the read-only layers from VSMB. These's are ref-counted and
// only removed once the count drops to zero. This allows multiple containers
// to share layers.
if uvm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
if vm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
if e := uvm.RemoveVSMB(ctx, layerPath, true); e != nil {
if e := vm.RemoveVSMB(ctx, layerPath, true); e != nil {
log.G(ctx).WithError(e).Warn("remove VSMB failed")
if retError == nil {
retError = e
Expand All @@ -414,10 +413,10 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
// Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted
// and only removed once the count drops to zero. This allows multiple containers to
// share layers. Note that SCSI is used on large layers.
if uvm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
if vm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
hostPath := filepath.Join(layerPath, "layer.vhd")
if err := removeLCOWLayer(ctx, uvm, hostPath); err != nil {
if err := removeLCOWLayer(ctx, vm, hostPath); err != nil {
log.G(ctx).WithError(err).Warn("remove layer failed")
if retError == nil {
retError = err
Expand Down Expand Up @@ -447,11 +446,11 @@ func GetHCSLayers(ctx context.Context, vm *uvm.UtilityVM, paths []string) (layer
return layers, nil
}

func containerRootfsPath(uvm *uvm.UtilityVM, rootPath string) string {
if uvm.OS() == "windows" {
return ospath.Join(uvm.OS(), rootPath)
func containerRootfsPath(vm *uvm.UtilityVM, rootPath string) string {
if vm.OS() == "windows" {
return ospath.Join(vm.OS(), rootPath)
}
return ospath.Join(uvm.OS(), rootPath, uvmpkg.RootfsPath)
return ospath.Join(vm.OS(), rootPath, uvm.RootfsPath)
}

func getScratchVHDPath(layerFolders []string) (string, error) {
Expand Down
Loading

0 comments on commit 35af99c

Please sign in to comment.