Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable unit tests in PR and merge builds #321

Merged
merged 4 commits into from
Aug 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions .github/workflows/actions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,24 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Run the formatter, linter, and vetter
uses: dell/common-github-actions/go-code-formatter-linter-vetter@main
with:
directories: ./...
test:
name: Run Go unit tests and check package coverage
runs-on: ubuntu-latest
steps:
- name: Checkout csi-powermax
uses: actions/checkout@v4
- name: Run unit tests and check package coverage
uses: dell/common-github-actions/go-code-tester@main
with:
threshold: 1
test-folder: "./service"
race-detector: "true"
skip-test: "TestGoDog"
go_security_scan:
name: Run gosec
runs-on: ubuntu-latest
Expand All @@ -31,7 +44,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Run malware scan
uses: dell/common-github-actions/malware-scanner@main
with:
Expand Down
10 changes: 7 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
# Copyright © 2020-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -59,9 +59,13 @@ docker:
push: docker
make -f docker.mk push

# Windows or Linux; requires no hardware
# Run unit tests and skip the BDD tests
unit-test: golint check
( cd service; go clean -cache; CGO_ENABLED=0 GO111MODULE=on go test -v -coverprofile=c.out ./... )
( cd service; go clean -cache; CGO_ENABLED=0 GO111MODULE=on go test -skip TestGoDog -v -coverprofile=c.out ./... )

# Run BDD tests. Need to be root to run as tests require some system access, need to fix
bdd-test: golint check
( cd service; go clean -cache; CGO_ENABLED=0 GO111MODULE=on go test -run TestGoDog -v -coverprofile=c.out ./... )

# Linux only; populate env.sh with the hardware parameters
integration-test:
Expand Down
16 changes: 8 additions & 8 deletions service/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ func (s *service) CreateVolume(
err = s.validateStoragePoolID(ctx, symmetrixID, storagePoolID, pmaxClient)
if err != nil {
log.Error(err.Error())
return nil, status.Errorf(codes.InvalidArgument, err.Error())
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}

// SLO is optional
Expand Down Expand Up @@ -662,8 +662,8 @@ func (s *service) CreateVolume(
return nil, status.Errorf(codes.Internal, "Error fetching volume for idempotence check: %s", err.Error())
}
if len(vol.StorageGroupIDList) < 1 {
log.Error("Idempotence check: StorageGroupIDList is empty for (%s): " + volumeID)
return nil, status.Errorf(codes.Internal, "Idempotence check: StorageGroupIDList is empty for (%s): "+volumeID)
log.Error("Idempotence check: StorageGroupIDList is empty for volume ", volumeID)
return nil, status.Errorf(codes.Internal, "Idempotence check: StorageGroupIDList is empty for (%s): ", volumeID)
}
matchesStorageGroup := false
for _, sgid := range vol.StorageGroupIDList {
Expand Down Expand Up @@ -1105,8 +1105,8 @@ func (s *service) createMetroVolume(ctx context.Context, req *csi.CreateVolumeRe
return nil, status.Errorf(codes.Internal, "Error fetching volume for idempotence check: %s", err.Error())
}
if len(vol.StorageGroupIDList) < 1 {
log.Error("Idempotence check: StorageGroupIDList is empty for (%s): " + volumeID)
return nil, status.Errorf(codes.Internal, "Idempotence check: StorageGroupIDList is empty for (%s): "+volumeID)
log.Error("Idempotence check: StorageGroupIDList is empty for volume ", volumeID)
return nil, status.Errorf(codes.Internal, "Idempotence check: StorageGroupIDList is empty for (%s): ", volumeID)
}
matchesStorageGroup := false
for _, sgid := range vol.StorageGroupIDList {
Expand Down Expand Up @@ -2844,7 +2844,7 @@ func (s *service) GetCapacity(
err = s.validateStoragePoolID(ctx, symmetrixID, storagePoolID, pmaxClient)
if err != nil {
log.Error(err.Error())
return nil, status.Errorf(codes.InvalidArgument, err.Error())
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}

// log all parameters used in GetCapacity call
Expand Down Expand Up @@ -3738,7 +3738,7 @@ func (s *service) CreateRemoteVolume(ctx context.Context, req *csiext.CreateRemo
err = s.validateStoragePoolID(ctx, remoteSymID, remoteSRPID, pmaxClient)
if err != nil {
log.Error(err.Error())
return nil, status.Errorf(codes.InvalidArgument, err.Error())
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}

// Validate Remote SLO
Expand Down Expand Up @@ -4258,7 +4258,7 @@ func (s *service) getStorageProtectionGroupStatus(ctx context.Context, protectio
log.WithFields(fields).Info("Executing GetStorageProtectionGroupStatus with following fields")
_, rDFGno, repMode, err := GetRDFInfoFromSGID(protectionGroupID)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
if repMode == Async || repMode == Sync {
psg, err := pmaxClient.GetStorageGroupRDFInfo(ctx, symID, protectionGroupID, rDFGno)
Expand Down
2 changes: 1 addition & 1 deletion service/csi_extension_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func (s *service) checkIfNodeIsConnected(ctx context.Context, symID string, node
message = fmt.Sprintf("connectivity unknown for array %s to node %s due to %s", symID, nodeID, err)
log.Error(message)
rep.Messages = append(rep.Messages, message)
log.Errorf(err.Error())
log.Errorf("%s", err.Error())
}

if connected {
Expand Down
4 changes: 2 additions & 2 deletions service/deletion_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func (queue *deletionQueue) QueueDeviceForDeletion(device csiDevice) error {
if dev.equals(device) {
msg := fmt.Sprintf("%s: found existing entry in deletion queue with volume handle: %s, added at: %v",
dev.print(), dev.VolumeIdentifier, dev.Status.AdditionTime)
return fmt.Errorf(msg)
return fmt.Errorf("%s", msg)
}
}
queue.DeviceList = append(queue.DeviceList, &device)
Expand Down Expand Up @@ -799,7 +799,7 @@ func (worker *deletionWorker) populateDeletionQueue() {
for _, symID := range worker.SymmetrixIDs {
log.Infof("Processing symmetrix %s for volumes to be deleted with cluster prefix: %s", symID, worker.ClusterPrefix)
volDeletePrefix := DeletionPrefix + CSIPrefix + "-" + worker.ClusterPrefix
log.Infof("Deletion Prefix: " + volDeletePrefix)
log.Infof("Deletion Prefix: %s", volDeletePrefix)
pmaxClient, err := symmetrix.GetPowerMaxClient(symID)
if err != nil {
log.Error(err.Error())
Expand Down
7 changes: 4 additions & 3 deletions service/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func (s *service) VolumeMigrate(ctx context.Context, req *csimgr.VolumeMigrateRe
err = s.validateStoragePoolID(ctx, symID, storagePoolID, pmaxClient)
if err != nil {
log.Error(err.Error())
return nil, status.Errorf(codes.InvalidArgument, err.Error())
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}

// SLO is optional
Expand Down Expand Up @@ -296,8 +296,9 @@ func replToNonRepl(ctx context.Context, params map[string]string, sourceScParams
localRDFGrpNo = strconv.Itoa(vol.RDFGroupIDList[0].RDFGroupNumber)
rdfInfo, err := pmaxClient.GetRDFGroupByID(ctx, symID, localRDFGrpNo)
if err != nil {
log.Error(fmt.Sprintf("Could not get remote rdfG for %s: %s:", localRDFGrpNo, err.Error()))
return status.Errorf(codes.Internal, fmt.Sprintf("Could not get remote rdfG for %s: %s:", localRDFGrpNo, err.Error()))
msg := fmt.Sprintf("Could not get local rdfG for %s: %s:", localRDFGrpNo, err.Error())
log.Error(msg)
return status.Errorf(codes.Internal, "%s", msg)
}
remoteRDFGrpNo = strconv.Itoa(rdfInfo.RemoteRdfgNumber)
}
Expand Down
28 changes: 14 additions & 14 deletions service/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -1685,7 +1685,7 @@ func (s *service) verifyAndUpdateInitiatorsInADiffHost(ctx context.Context, symI
}
}
if 0 < len(errormsg) {
return validInitiators, fmt.Errorf(errormsg)
return validInitiators, fmt.Errorf("%s", errormsg)
}

if len(validInitiators) == 0 && strings.Contains(hostID, NvmeTCPTransportProtocol) {
Expand Down Expand Up @@ -1915,7 +1915,7 @@ func (s *service) updateNQNWithHostID(ctx context.Context, symID string, NQNs []
} else {
hostID := initiator.HostID
nqn = nqn + ":" + hostID
log.Infof("updated host nqn is:" + nqn)
log.Infof("updated host nqn is: %s", nqn)
}
updatesHostNQNs = append(updatesHostNQNs, nqn)
}
Expand All @@ -1940,7 +1940,7 @@ func (s *service) getAndConfigureMaskingViewTargets(ctx context.Context, array,
// this will also update the cache
targets, err := s.getIscsiTargetsForMaskingView(ctx, array, view, pmaxClient)
if err != nil {
log.Debugf(err.Error())
log.Debugf("%s", err.Error())
return goISCSITargets, err
}
for _, tgt := range targets {
Expand Down Expand Up @@ -1976,7 +1976,7 @@ func (s *service) getAndConfigureMaskingViewTargetsNVMeTCP(ctx context.Context,
// this will also update the cache
targets, err := s.getNVMeTCPTargetsForMaskingView(ctx, array, view, pmaxClient)
if err != nil {
log.Debugf(err.Error())
log.Debugf("%s", err.Error())
return goNVMeTargets, err
}
for _, tgt := range targets {
Expand Down Expand Up @@ -2108,8 +2108,8 @@ func (s *service) setCHAPCredentials(array string, targets []maskingViewTargetIn
if len(IQNs) > 0 {
if s.opts.CHAPUserName != "" {
errorMsg := "multiple IQNs found on host and CHAP username is not set. invalid configuration"
log.Debugf(errorMsg)
return fmt.Errorf(errorMsg)
log.Debugf("%s", errorMsg)
return fmt.Errorf("%s", errorMsg)
}
}
chapUserName := s.opts.CHAPUserName
Expand Down Expand Up @@ -2166,7 +2166,7 @@ func (s *service) ensureISCSIDaemonStarted() error {
// Failed to get the status of ISCSI Daemon
errMsg := fmt.Sprintf("failed to find %s. Going to panic", target)
log.Error(errMsg)
panic(fmt.Errorf(errMsg))
panic(fmt.Errorf("%s", errMsg))
} else if unit.ActiveState != "active" {
log.Infof("%s is not active. Current state - %s", target, unit.ActiveState)
} else {
Expand All @@ -2190,9 +2190,9 @@ func (s *service) ensureISCSIDaemonStarted() error {
job := <-responsechan
if job != "done" {
// Job didn't succeed
errMsg := fmt.Sprintf("Failed to get a successful response from the job to start ISCSI daemon")
errMsg := "Failed to get a successful response from the job to start ISCSI daemon"
log.Error(errMsg)
return fmt.Errorf(errMsg)
return fmt.Errorf("%s", errMsg)
}
log.Info("Successfully started ISCSI daemon")
return nil
Expand Down Expand Up @@ -2432,7 +2432,7 @@ func (s *service) createOrUpdateFCHost(ctx context.Context, array string, nodeNa
log.Debug(fmt.Sprintf("GetHostById returned: %v, %v", host, err))
if err != nil {
// host does not exist, create it
log.Infof(fmt.Sprintf("Array %s FC Host %s does not exist. Creating it.", array, nodeName))
log.Infof("Array %s FC Host %s does not exist. Creating it.", array, nodeName)
host, err = s.retryableCreateHost(ctx, array, nodeName, hostInitiators, nil, pmaxClient)
if err != nil {
return host, err
Expand Down Expand Up @@ -2466,11 +2466,11 @@ func (s *service) createOrUpdateIscsiHost(ctx context.Context, array string, nod
}

host, err := pmaxClient.GetHostByID(ctx, array, nodeName)
log.Infof(fmt.Sprintf("GetHostById returned: %v, %v", host, err))
log.Infof("GetHostById returned: %v, %v", host, err)

if err != nil {
// host does not exist, create it
log.Infof(fmt.Sprintf("ISCSI Host %s does not exist. Creating it.", nodeName))
log.Infof("ISCSI Host %s does not exist. Creating it.", nodeName)
host, err = s.retryableCreateHost(ctx, array, nodeName, IQNs, nil, pmaxClient)
if err != nil {
return &types.Host{}, fmt.Errorf("Unable to create Host: %v", err)
Expand Down Expand Up @@ -2503,11 +2503,11 @@ func (s *service) createOrUpdateNVMeTCPHost(ctx context.Context, array string, n

// process the NQNs
host, err := pmaxClient.GetHostByID(ctx, array, nodeName)
log.Infof(fmt.Sprintf("GetHostById returned: %v, %v", host, err))
log.Infof("GetHostById returned: %v, %v", host, err)

if err != nil {
// host does not exist, create it
log.Infof(fmt.Sprintf("NVMe Host %s does not exist. Creating it.", nodeName))
log.Infof("NVMe Host %s does not exist. Creating it.", nodeName)
host, err = s.retryableCreateHost(ctx, array, nodeName, NQNs, nil, pmaxClient)
if err != nil {
return &types.Host{}, fmt.Errorf("Unable to create Host: %v", err)
Expand Down
Loading
Loading