diff --git a/internal/environment/nomad_environment.go b/internal/environment/nomad_environment.go index 4a859936..5f942d94 100644 --- a/internal/environment/nomad_environment.go +++ b/internal/environment/nomad_environment.go @@ -95,6 +95,10 @@ func (n *NomadEnvironment) PrewarmingPoolSize() uint { count, err := strconv.Atoi(configTaskGroup.Meta[nomad.ConfigMetaPoolSizeKey]) if err != nil { log.WithError(err).Error("Prewarming pool size can not be parsed from Job") + return 0 + } else if count < 0 { + log.WithError(util.ErrOverflow).WithField("size", count).Warning("Not a valid Prewarming pool size") + return 0 } return uint(count) } @@ -112,18 +116,23 @@ func (n *NomadEnvironment) SetPrewarmingPoolSize(count uint) { func (n *NomadEnvironment) CPULimit() uint { defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(n.job) defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup) - return uint(*defaultTask.Resources.CPU) + cpuLimit := *defaultTask.Resources.CPU + if cpuLimit < 0 { + log.WithError(util.ErrOverflow).WithField("limit", cpuLimit).Warning("not a valid CPU limit") + return 0 + } + return uint(cpuLimit) } func (n *NomadEnvironment) SetCPULimit(limit uint) error { if limit > math.MaxInt32 { - return fmt.Errorf("limit too high: %w", util.ErrMaxNumberExceeded) + return fmt.Errorf("limit too high: %w", util.ErrOverflow) } defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(n.job) defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup) - integerCPULimit := int(limit) //nolint:gosec // We check for an integer overflow right above. + integerCPULimit := int(limit) defaultTask.Resources.CPU = &integerCPULimit return nil } @@ -131,22 +140,26 @@ func (n *NomadEnvironment) SetCPULimit(limit uint) error { func (n *NomadEnvironment) MemoryLimit() uint { defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(n.job) defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup) - maxMemoryLimit := defaultTask.Resources.MemoryMaxMB - if maxMemoryLimit != nil { - return uint(*maxMemoryLimit) + if defaultTask.Resources.MemoryMaxMB == nil { + return 0 + } + maxMemoryLimit := *defaultTask.Resources.MemoryMaxMB + if maxMemoryLimit < 0 { + log.WithError(util.ErrOverflow).WithField("limit", maxMemoryLimit).Warning("not a valid memory limit") + return 0 } - return 0 + return uint(maxMemoryLimit) } func (n *NomadEnvironment) SetMemoryLimit(limit uint) error { if limit > math.MaxInt32 { - return fmt.Errorf("limit too high: %w", util.ErrMaxNumberExceeded) + return fmt.Errorf("limit too high: %w", util.ErrOverflow) } defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(n.job) defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup) - integerMemoryMaxLimit := int(limit) //nolint:gosec // We check for an integer overflow right above. + integerMemoryMaxLimit := int(limit) defaultTask.Resources.MemoryMaxMB = &integerMemoryMaxLimit return nil } diff --git a/internal/nomad/command_execution.go b/internal/nomad/command_execution.go index da36126f..e96c1c42 100644 --- a/internal/nomad/command_execution.go +++ b/internal/nomad/command_execution.go @@ -169,10 +169,10 @@ func injectStartDebugMessage(command string, start uint, end int) string { if start < uint(len(commandFields)) { commandFields = commandFields[start:] - if start > uint(math.MaxInt32)-uint(end) { + if (end < 0 && start > uint(math.MaxInt32)) || (end > 0 && start > uint(math.MaxInt32)-uint(end)) { log.WithField("start", start).Error("passed start too big") } - end -= int(start) //nolint:gosec // We check for an integer overflow right above. + end -= int(start) } if end >= 0 && end < len(commandFields) { commandFields = commandFields[:end] diff --git a/internal/runner/aws_runner_test.go b/internal/runner/aws_runner_test.go index 23b83c5b..54844a35 100644 --- a/internal/runner/aws_runner_test.go +++ b/internal/runner/aws_runner_test.go @@ -75,7 +75,7 @@ func (s *MainTestSuite) TestAWSFunctionWorkload_ExecuteInteractively() { s.Run("establishes WebSocket connection to AWS endpoint", func() { // Convert http://127.0.0.1 to ws://127.0.0.1 config.Config.AWS.Endpoint = "ws" + strings.TrimPrefix(sv.URL, "http") - awsMock.ctx, cancel = context.WithCancel(context.Background()) + awsMock.ctx, cancel = context.WithCancel(context.Background()) //nolint:fatcontext // We are resetting the context not making it bigger. cancel() runnerWorkload.StoreExecution(tests.DefaultEnvironmentIDAsString, &dto.ExecutionRequest{}) @@ -88,7 +88,7 @@ func (s *MainTestSuite) TestAWSFunctionWorkload_ExecuteInteractively() { s.Run("sends execution request", func() { s.T().Skip("The AWS runner ignores its context for executions and waits infinitely for the exit message.") - awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout) + awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout) //nolint:fatcontext // We are not making the context bigger. defer cancel() command := "sl" request := &dto.ExecutionRequest{Command: command} diff --git a/internal/runner/nomad_manager.go b/internal/runner/nomad_manager.go index daea0292..c0368f77 100644 --- a/internal/runner/nomad_manager.go +++ b/internal/runner/nomad_manager.go @@ -155,7 +155,7 @@ func (m *NomadRunnerManager) checkPrewarmingPoolAlert(ctx context.Context, envir if reloadTimeout > uint(math.MaxInt64)/uint(time.Second) { log.WithField("timeout", reloadTimeout).Error("configured reload timeout too big") } - reloadTimeoutDuration := time.Duration(reloadTimeout) * time.Second //nolint:gosec // We check for an integer overflow right above. + reloadTimeoutDuration := time.Duration(reloadTimeout) * time.Second if reloadTimeout == 0 || float64(environment.IdleRunnerCount())/float64(environment.PrewarmingPoolSize()) >= prewarmingPoolThreshold { return diff --git a/internal/runner/nomad_manager_test.go b/internal/runner/nomad_manager_test.go index 1de44f4b..548ba6fe 100644 --- a/internal/runner/nomad_manager_test.go +++ b/internal/runner/nomad_manager_test.go @@ -693,7 +693,7 @@ func (s *MainTestSuite) TestNomadRunnerManager_Load() { } func (s *MainTestSuite) TestNomadRunnerManager_checkPrewarmingPoolAlert() { - timeout := 1 + const timeout = 1 config.Config.Server.Alert.PrewarmingPoolReloadTimeout = uint(timeout) config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5 environment := &ExecutionEnvironmentMock{} diff --git a/internal/runner/nomad_runner.go b/internal/runner/nomad_runner.go index b6060e2e..e70d6cd2 100644 --- a/internal/runner/nomad_runner.go +++ b/internal/runner/nomad_runner.go @@ -110,8 +110,11 @@ func (r *NomadJob) Environment() dto.EnvironmentID { func (r *NomadJob) MappedPorts() []*dto.MappedPort { ports := make([]*dto.MappedPort, 0, len(r.portMappings)) for _, portMapping := range r.portMappings { + if portMapping.To < 0 { + log.WithError(util.ErrOverflow).WithField("mapping", portMapping.To).Warn("not a valid port") + } ports = append(ports, &dto.MappedPort{ - ExposedPort: uint(portMapping.To), + ExposedPort: uint(portMapping.To), //nolint:gosec // We check for an overflow right above. HostAddress: fmt.Sprintf("%s:%d", portMapping.HostIP, portMapping.Value), }) } @@ -132,7 +135,7 @@ func (r *NomadJob) UpdateMappedPorts(ports []*dto.MappedPort) error { return fmt.Errorf("failed parsing the port: %w", err) } if portMapping.ExposedPort > math.MaxInt32 { - return util.ErrMaxNumberExceeded + return util.ErrOverflow } mapping = append(mapping, nomadApi.PortMapping{ diff --git a/pkg/monitoring/influxdb2_middleware.go b/pkg/monitoring/influxdb2_middleware.go index 1f7b143c..f3cb11ad 100644 --- a/pkg/monitoring/influxdb2_middleware.go +++ b/pkg/monitoring/influxdb2_middleware.go @@ -71,9 +71,9 @@ func InitializeInfluxDB(influxConfiguration *config.InfluxDB) (cancel func()) { // Set options for retrying with the influx client. options := influxdb2.DefaultOptions() - options.SetRetryInterval(uint(retryInterval.Milliseconds())) + options.SetRetryInterval(uint(retryInterval.Milliseconds())) //nolint:gosec // The constant 5_000 do not overflow uint. options.SetMaxRetries(maxRetries) - options.SetMaxRetryTime(uint(retryExpire.Milliseconds())) + options.SetMaxRetryTime(uint(retryExpire.Milliseconds())) //nolint:gosec // The constant 600_000 do not overflow uint. options.SetRetryBufferLimit(retryBufferLimit) // Create a new influx client. diff --git a/pkg/util/util.go b/pkg/util/util.go index 1fd2aa89..dd690081 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -15,7 +15,7 @@ var ( // InitialWaitingDuration is the default initial duration of waiting after a failed time. InitialWaitingDuration = time.Second ErrRetryContextDone = errors.New("the retry context is done") - ErrMaxNumberExceeded = errors.New("the passed number is too big") + ErrOverflow = errors.New("the passed number is too small or too big") ) func retryExponential(ctx context.Context, sleep time.Duration, f func() error) func() error {