diff --git a/.golangci.toml b/.golangci.toml index 6a285043c..34461c416 100644 --- a/.golangci.toml +++ b/.golangci.toml @@ -46,6 +46,9 @@ "noctx", # Too strict "exhaustive", # Too strict "nlreturn", # Too strict + "wrapcheck", + "exhaustivestruct", + "tparallel", ] [issues] diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml index 51523ed4c..8c45e3f9e 100644 --- a/.semaphore/semaphore.yml +++ b/.semaphore/semaphore.yml @@ -14,16 +14,21 @@ fail_fast: stop: when: "branch != 'master'" +global_job_config: + secrets: + - name: dockerhub-pull-secrets + prologue: + commands: + - curl -sSfL https://raw.githubusercontent.com/ldez/semgo/master/godownloader.sh | sudo sh -s -- -b "/usr/local/bin" + - sudo semgo go1.15 + - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin + - checkout + blocks: - name: Build skip: when: "branch = 'gh-pages'" task: - prologue: - commands: - - curl -sSfL https://raw.githubusercontent.com/ldez/semgo/master/godownloader.sh | sudo sh -s -- -b "/usr/local/bin" - - sudo semgo go1.15 - - checkout jobs: - name: Cache Go dependencies commands: @@ -43,9 +48,6 @@ blocks: task: prologue: commands: - - curl -sSfL https://raw.githubusercontent.com/ldez/semgo/master/godownloader.sh | sudo sh -s -- -b "/usr/local/bin" - - sudo semgo go1.15 - - checkout - cache restore jobs: - name: Unit Tests @@ -58,9 +60,6 @@ blocks: task: prologue: commands: - - curl -sSfL https://raw.githubusercontent.com/ldez/semgo/master/godownloader.sh | sudo sh -s -- -b "/usr/local/bin" - - sudo semgo go1.15 - - checkout - cache restore - cache restore traefik-mesh-dist-$SEMAPHORE_GIT_BRANCH-$SEMAPHORE_WORKFLOW_ID - cache restore traefik-mesh-img-$SEMAPHORE_GIT_BRANCH-$SEMAPHORE_WORKFLOW_ID diff --git a/Dockerfile b/Dockerfile index b25874b06..f4d86297f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,7 +21,7 @@ WORKDIR /go/src/github.com/traefik/mesh RUN curl -sfL https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh | sh # Download golangci-lint binary to bin folder in $GOPATH -RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.31.0 +RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.32.0 ENV GO111MODULE on COPY go.mod go.sum ./ diff --git a/cmd/mesh/mesh.go b/cmd/mesh/mesh.go index 68a6d1a47..22f99c88b 100644 --- a/cmd/mesh/mesh.go +++ b/cmd/mesh/mesh.go @@ -2,6 +2,7 @@ package main import ( "context" + "errors" "fmt" stdlog "log" "net/http" @@ -119,7 +120,7 @@ func traefikMeshCommand(config *cmd.TraefikMeshConfiguration) error { go func() { defer wg.Done() - if err := apiServer.ListenAndServe(); err != http.ErrServerClosed { + if err := apiServer.ListenAndServe(); errors.Is(err, http.ErrServerClosed) { apiErrCh <- fmt.Errorf("API server has stopped unexpectedly: %w", err) } }() diff --git a/integration/k3d/k3d.go b/integration/k3d/k3d.go index f76192ac8..da11ed021 100644 --- a/integration/k3d/k3d.go +++ b/integration/k3d/k3d.go @@ -90,7 +90,7 @@ func NewCluster(logger logrus.FieldLogger, masterURL string, name string, opts . } if err = createCluster(logger, name, clusterOpts.Cmd); err != nil { - return nil, fmt.Errorf("unable to create k3s cluster: %d", err) + return nil, fmt.Errorf("unable to create k3s cluster: %w", err) } if err = importDockerImages(logger, name, clusterOpts.Images); err != nil { @@ -199,7 +199,7 @@ func (c *Cluster) WaitReadyDeployment(name, namespace string, timeout time.Durat return fmt.Errorf("deployment %q has not been yet created", name) } - return fmt.Errorf("unable get deployment %q in namespace %q: %v", name, namespace, err) + return fmt.Errorf("unable get deployment %q in namespace %q: %w", name, namespace, err) } if d.Status.UpdatedReplicas == *(d.Spec.Replicas) && @@ -230,7 +230,7 @@ func (c *Cluster) WaitReadyDaemonSet(name, namespace string, timeout time.Durati return fmt.Errorf("daemonset %q has not been yet created", name) } - return fmt.Errorf("unable get daemonset %q in namespace %q: %v", name, namespace, err) + return fmt.Errorf("unable get daemonset %q in namespace %q: %w", name, namespace, err) } if d.Status.NumberReady == d.Status.DesiredNumberScheduled { return nil @@ -257,7 +257,7 @@ func (c *Cluster) WaitReadyPod(name, namespace string, timeout time.Duration) er return fmt.Errorf("pod %q has not been yet created", name) } - return fmt.Errorf("unable get pod %q in namespace %q: %v", name, namespace, err) + return fmt.Errorf("unable get pod %q in namespace %q: %w", name, namespace, err) } if !isPodReady(pod) { @@ -370,11 +370,11 @@ func createK8sClient(logger logrus.FieldLogger, clusterName, masterURL string) ( err = try.Retry(func() error { client, err = k8s.NewClient(logger, masterURL, kubeConfigPath) if err != nil { - return fmt.Errorf("unable to create clients: %v", err) + return fmt.Errorf("unable to create clients: %w", err) } if _, err = client.KubernetesClient().Discovery().ServerVersion(); err != nil { - return fmt.Errorf("unable to get server version: %v", err) + return fmt.Errorf("unable to get server version: %w", err) } return nil diff --git a/integration/try/condition.go b/integration/try/condition.go index f617b59cf..841bdf849 100644 --- a/integration/try/condition.go +++ b/integration/try/condition.go @@ -38,7 +38,7 @@ func BodyContains(values ...string) ResponseCondition { return func(res *http.Response) error { body, err := ioutil.ReadAll(res.Body) if err != nil { - return fmt.Errorf("failed to read response body: %s", err) + return fmt.Errorf("failed to read response body: %w", err) } for _, value := range values { diff --git a/integration/try/try.go b/integration/try/try.go index 19a6efe40..1cf1f3201 100644 --- a/integration/try/try.go +++ b/integration/try/try.go @@ -19,7 +19,7 @@ func Retry(f func() error, timeout time.Duration) error { ebo.MaxElapsedTime = applyCIMultiplier(timeout) if err := backoff.Retry(safe.OperationWithRecover(f), ebo); err != nil { - return fmt.Errorf("unable execute function: %v", err) + return fmt.Errorf("unable execute function: %w", err) } return nil diff --git a/pkg/annotations/annotations_test.go b/pkg/annotations/annotations_test.go index bc90500cf..413a09af7 100644 --- a/pkg/annotations/annotations_test.go +++ b/pkg/annotations/annotations_test.go @@ -1,6 +1,7 @@ package annotations import ( + "errors" "testing" "github.com/stretchr/testify/assert" @@ -154,7 +155,7 @@ func TestGetRetryAttempts(t *testing.T) { attempts, err := GetRetryAttempts(test.annotations) if test.err { require.Error(t, err) - assert.Equal(t, test.wantNotFound, err == ErrNotFound) + assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound)) return } @@ -192,7 +193,7 @@ func TestGetCircuitBreakerExpression(t *testing.T) { value, err := GetCircuitBreakerExpression(test.annotations) if test.err { require.Error(t, err) - assert.Equal(t, test.wantNotFound, err == ErrNotFound) + assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound)) return } @@ -237,7 +238,7 @@ func TestGetRateLimitBurst(t *testing.T) { value, err := GetRateLimitBurst(test.annotations) if test.err { require.Error(t, err) - assert.Equal(t, test.wantNotFound, err == ErrNotFound) + assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound)) return } @@ -282,7 +283,7 @@ func TestGetRateLimitAverage(t *testing.T) { value, err := GetRateLimitAverage(test.annotations) if test.err { require.Error(t, err) - assert.Equal(t, test.wantNotFound, err == ErrNotFound) + assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound)) return } diff --git a/pkg/annotations/middleware.go b/pkg/annotations/middleware.go index d5e4b8e33..887260739 100644 --- a/pkg/annotations/middleware.go +++ b/pkg/annotations/middleware.go @@ -38,7 +38,7 @@ func buildRetryMiddleware(annotations map[string]string) (middleware *dynamic.Mi retryAttempts, err = GetRetryAttempts(annotations) if err != nil { - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { return nil, "", nil } @@ -60,14 +60,14 @@ func buildRateLimitMiddleware(annotations map[string]string) (middleware *dynami ) rateLimitBurst, err = GetRateLimitBurst(annotations) - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { return nil, "", nil } else if err != nil { return nil, "", fmt.Errorf("unable to build rate-limit middleware: %w", err) } rateLimitAverage, err = GetRateLimitAverage(annotations) - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { return nil, "", nil } else if err != nil { return nil, "", fmt.Errorf("unable to build rate-limit middleware: %w", err) @@ -93,7 +93,7 @@ func buildCircuitBreakerMiddleware(annotations map[string]string) (middleware *d circuitBreakerExpression, err = GetCircuitBreakerExpression(annotations) if err != nil { - if err == ErrNotFound { + if errors.Is(err, ErrNotFound) { return nil, "", nil } diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go index 3b78ecdc7..c97c2ea49 100644 --- a/pkg/k8s/client.go +++ b/pkg/k8s/client.go @@ -109,7 +109,7 @@ func buildKubernetesClient(log logrus.FieldLogger, config *rest.Config) (*kubern client, err := kubernetes.NewForConfig(config) if err != nil { - return nil, fmt.Errorf("unable to create kubernetes client: %v", err) + return nil, fmt.Errorf("unable to create kubernetes client: %w", err) } return client, nil @@ -121,7 +121,7 @@ func buildSmiAccessClient(log logrus.FieldLogger, config *rest.Config) (*accessc client, err := accessclient.NewForConfig(config) if err != nil { - return nil, fmt.Errorf("unable to create SMI Access Client: %v", err) + return nil, fmt.Errorf("unable to create SMI Access Client: %w", err) } return client, nil @@ -133,7 +133,7 @@ func buildSmiSpecsClient(log logrus.FieldLogger, config *rest.Config) (*specscli client, err := specsclient.NewForConfig(config) if err != nil { - return nil, fmt.Errorf("unable to create SMI Specs Client: %v", err) + return nil, fmt.Errorf("unable to create SMI Specs Client: %w", err) } return client, nil @@ -145,7 +145,7 @@ func buildSmiSplitClient(log logrus.FieldLogger, config *rest.Config) (*splitcli client, err := splitclient.NewForConfig(config) if err != nil { - return nil, fmt.Errorf("unable to create SMI Split Client: %v", err) + return nil, fmt.Errorf("unable to create SMI Split Client: %w", err) } return client, nil diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index d30a27c0f..532ff2f99 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -112,7 +112,7 @@ func (p *Provider) BuildConfig(t *topology.Topology) *dynamic.Configuration { for svcKey, svc := range t.Services { if err := p.buildConfigForService(t, cfg, svc); err != nil { - err = fmt.Errorf("unable to build configuration: %v", err) + err = fmt.Errorf("unable to build configuration: %w", err) svc.AddError(err) p.logger.Errorf("Error building dynamic configuration for Service %q: %v", svcKey, err) } @@ -155,7 +155,7 @@ func (p *Provider) buildConfigForService(t *topology.Topology, cfg *dynamic.Conf for _, tsKey := range svc.TrafficSplits { if err := p.buildServiceAndRoutersForTrafficSplit(t, cfg, tsKey, scheme, trafficType, middlewareKeys); err != nil { - err = fmt.Errorf("unable to build routers and services : %v", err) + err = fmt.Errorf("unable to build routers and services : %w", err) t.TrafficSplits[tsKey].AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficSplit %q: %v", tsKey, err) @@ -200,7 +200,7 @@ func (p *Provider) buildACLConfigRoutersAndServices(t *topology.Topology, cfg *d for _, ttKey := range svc.TrafficTargets { if err := p.buildServicesAndRoutersForTrafficTarget(t, cfg, ttKey, scheme, trafficType, middlewareKeys); err != nil { - err = fmt.Errorf("unable to build routers and services: %v", err) + err = fmt.Errorf("unable to build routers and services: %w", err) t.ServiceTrafficTargets[ttKey].AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficTarget %q: %v", ttKey, err) @@ -235,7 +235,7 @@ func (p *Provider) buildServicesAndRoutersForHTTPService(t *topology.Topology, c for portID, svcPort := range svc.Ports { entrypoint, err := p.buildHTTPEntrypoint(portID) if err != nil { - err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %w", svcPort.Port, err) svc.AddError(err) p.logger.Errorf("Error building dynamic configuration for Service %q: %v", svcKey, err) @@ -255,7 +255,7 @@ func (p *Provider) buildServicesAndRoutersForTCPService(t *topology.Topology, cf for _, svcPort := range svc.Ports { entrypoint, err := p.buildTCPEntrypoint(svc, svcPort.Port) if err != nil { - err = fmt.Errorf("unable to build TCP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build TCP entrypoint for port %d: %w", svcPort.Port, err) svc.AddError(err) p.logger.Errorf("Error building dynamic configuration for Service %q: %v", svcKey, err) @@ -273,7 +273,7 @@ func (p *Provider) buildServicesAndRoutersForUDPService(t *topology.Topology, cf for _, svcPort := range svc.Ports { entrypoint, err := p.buildUDPEntrypoint(svc, svcPort.Port) if err != nil { - err = fmt.Errorf("unable to build UDP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build UDP entrypoint for port %d: %w", svcPort.Port, err) svc.AddError(err) p.logger.Errorf("Error building dynamic configuration for Service %q: %v", svcKey, err) @@ -321,7 +321,7 @@ func (p *Provider) buildHTTPServicesAndRoutersForTrafficTarget(t *topology.Topol for portID, svcPort := range tt.Destination.Ports { entrypoint, err := p.buildHTTPEntrypoint(portID) if err != nil { - err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %w", svcPort.Port, err) tt.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficTarget %q: %v", ttKey, err) @@ -362,7 +362,7 @@ func (p *Provider) buildTCPServicesAndRoutersForTrafficTarget(t *topology.Topolo for _, svcPort := range tt.Destination.Ports { entrypoint, err := p.buildTCPEntrypoint(ttSvc, svcPort.Port) if err != nil { - err = fmt.Errorf("unable to build TCP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build TCP entrypoint for port %d: %w", svcPort.Port, err) tt.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficTarget %q: %v", ttKey, err) @@ -420,7 +420,7 @@ func (p *Provider) buildHTTPServiceAndRoutersForTrafficSplit(t *topology.Topolog for portID, svcPort := range tsSvc.Ports { backendSvcs, err := p.buildServicesForTrafficSplitBackends(t, cfg, ts, svcPort, scheme) if err != nil { - err = fmt.Errorf("unable to build HTTP backend services and port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build HTTP backend services and port %d: %w", svcPort.Port, err) ts.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficSplit %q: %v", tsKey, err) @@ -429,7 +429,7 @@ func (p *Provider) buildHTTPServiceAndRoutersForTrafficSplit(t *topology.Topolog entrypoint, err := p.buildHTTPEntrypoint(portID) if err != nil { - err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build HTTP entrypoint for port %d: %w", svcPort.Port, err) ts.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficSplit %q: %v", tsKey, err) @@ -464,7 +464,7 @@ func (p *Provider) buildTCPServiceAndRoutersForTrafficSplit(cfg *dynamic.Configu for _, svcPort := range tsSvc.Ports { entrypoint, err := p.buildTCPEntrypoint(tsSvc, svcPort.Port) if err != nil { - err = fmt.Errorf("unable to build TCP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build TCP entrypoint for port %d: %w", svcPort.Port, err) ts.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficSplit %q: %v", tsKey, err) @@ -495,7 +495,7 @@ func (p *Provider) buildUDPServiceAndRoutersForTrafficSplit(cfg *dynamic.Configu for _, svcPort := range tsSvc.Ports { entrypoint, err := p.buildUDPEntrypoint(tsSvc, svcPort.Port) if err != nil { - err = fmt.Errorf("unable to build UDP entrypoint for port %d: %v", svcPort.Port, err) + err = fmt.Errorf("unable to build UDP entrypoint for port %d: %w", svcPort.Port, err) ts.AddError(err) p.logger.Errorf("Error building dynamic configuration for TrafficSplit %q: %v", tsKey, err) diff --git a/pkg/topology/builder.go b/pkg/topology/builder.go index 0be262e5b..88c88d47f 100644 --- a/pkg/topology/builder.go +++ b/pkg/topology/builder.go @@ -139,7 +139,7 @@ func (b *Builder) evaluateTrafficTarget(res *resources, topology *Topology, tt * stt.Rules, err = b.buildTrafficTargetRules(res, tt) if err != nil { - err = fmt.Errorf("unable to build spec: %v", err) + err = fmt.Errorf("unable to build spec: %w", err) stt.AddError(err) b.logger.Errorf("Error building topology for TrafficTarget %q: %v", Key{tt.Name, tt.Namespace}, err) @@ -226,7 +226,7 @@ func (b *Builder) evaluateTrafficSplit(res *resources, topology *Topology, traff ts.Rules, err = b.buildTrafficSplitSpecs(res, trafficSplit) if err != nil { - err = fmt.Errorf("unable to build spec: %v", err) + err = fmt.Errorf("unable to build spec: %w", err) ts.AddError(err) b.logger.Errorf("Error building topology for TrafficTarget %q: %v", tsKey, err) @@ -311,7 +311,7 @@ func (b *Builder) populateTrafficSplitsAuthorizedIncomingTraffic(topology *Topol if err != nil { loopCausingTrafficSplitsByService[svc] = append(loopCausingTrafficSplitsByService[svc], tsKey) - err = fmt.Errorf("unable to get incoming pods: %v", err) + err = fmt.Errorf("unable to get incoming pods: %w", err) ts.AddError(err) b.logger.Errorf("Error building topology for TrafficSplit %q: %v", tsKey, err)