From 42720b8bea358ba707b38e2e3d841157e7b798dc Mon Sep 17 00:00:00 2001 From: Rod Vagg Date: Tue, 7 May 2024 12:08:28 +1000 Subject: [PATCH] chore: lint: update linter settings, fix lint errors Ref: https://github.com/filecoin-project/lotus/issues/11967 --- .golangci.yml | 23 +----- api/api_full.go | 8 +-- api/api_gateway.go | 4 +- api/api_storage.go | 2 +- api/api_test.go | 2 +- api/client/client.go | 20 +++--- api/types.go | 2 + blockstore/api.go | 6 +- blockstore/buffered.go | 6 +- blockstore/mem.go | 2 +- blockstore/net.go | 2 + blockstore/net_ws.go | 6 +- blockstore/sync.go | 2 +- build/builtin_actors_gen.go | 2 +- build/params_mainnet.go | 2 + build/params_shared_vals.go | 3 + chain/actors/aerrors/error_test.go | 24 +++---- chain/actors/aerrors/wrap.go | 2 +- chain/actors/manifest.go | 4 +- chain/beacon/mock.go | 4 +- chain/events/filter/index.go | 18 ++--- chain/events/filter/tipset.go | 2 +- chain/events/state/fastapi.go | 1 + chain/events/state/predicates_test.go | 4 +- chain/exchange/peer_tracker.go | 2 +- chain/gen/slashfilter/slashfilter.go | 2 +- .../gen/slashfilter/slashsvc/slashservice.go | 1 + chain/state/statetree_test.go | 22 +++--- chain/store/messages.go | 8 +-- chain/store/store.go | 3 +- chain/types/ethtypes/eth_transactions.go | 16 ++--- chain/types/ethtypes/eth_types.go | 4 +- chain/types/execresult.go | 1 + chain/wallet/wallet.go | 2 +- cmd/lotus-sim/simulation/mock/mock.go | 2 +- conformance/chaos/actor.go | 3 +- curiosrc/gc/storage_endpoint_gc.go | 6 +- gen/bundle/bundle.go | 2 +- genesis/types.go | 2 + journal/registry_test.go | 2 +- lib/harmony/harmonydb/harmonydb.go | 6 +- .../resources/miniopencl/mini_opencl.go | 16 ++--- lib/harmony/resources/resources.go | 7 +- lib/httpreader/httpreader.go | 2 + lib/parmap/parmap.go | 2 +- lib/rpcenc/reader.go | 4 +- lib/rpcenc/reader_test.go | 34 ++++----- lib/shardedmutex/shardedmutex_test.go | 2 +- lib/ulimit/ulimit_test.go | 4 +- markets/dagstore/miner_api.go | 4 +- markets/dagstore/miner_api_test.go | 4 +- markets/dagstore/wrapper.go | 6 +- markets/dagstore/wrapper_test.go | 6 +- markets/idxprov/idxprov_test/noop.go | 2 +- markets/idxprov/mesh.go | 12 ++-- node/config/types.go | 2 + node/modules/lp2p/discovery.go | 1 + node/modules/lp2p/host.go | 2 +- node/modules/lp2p/pubsub.go | 8 +-- node/modules/lp2p/rcmgr.go | 10 +-- node/modules/lp2p/routing.go | 2 +- .../modules/tracer/elasticsearch_transport.go | 10 +-- node/repo/blockstore_opts.go | 2 +- node/repo/memrepo.go | 4 +- storage/paths/db_index.go | 70 +++++++++---------- storage/paths/local.go | 22 +++--- storage/paths/local_test.go | 4 +- storage/pipeline/piece/piece_info.go | 4 +- storage/sealer/commitment/commr.go | 10 +-- storage/sealer/ffiwrapper/basicfs/fs.go | 2 +- storage/sealer/fr32/fr32_test.go | 2 +- storage/sealer/manager.go | 4 +- storage/sealer/manager_post.go | 2 +- storage/sealer/manager_test.go | 4 +- storage/sealer/mock/mock.go | 4 +- storage/sealer/sched.go | 9 +-- storage/sealer/sched_post.go | 6 +- storage/sealer/sched_test.go | 4 +- storage/sealer/sched_worker.go | 10 +-- storage/sealer/sched_worker_cache.go | 4 +- storage/sealer/selector_move.go | 2 +- storage/sealer/selector_task.go | 2 +- storage/sealer/stats.go | 2 +- storage/sealer/storiface/resources_test.go | 10 +-- storage/sealer/storiface/storage.go | 2 + storage/sealer/testworker_test.go | 2 +- storage/sealer/worker_local.go | 6 +- tools/stats/ipldstore/ipldstore.go | 8 ++- 88 files changed, 292 insertions(+), 284 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1d455e52559..cf9e2498a81 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,14 +6,12 @@ linters: - goimports - misspell - goconst - - golint + - revive - errcheck - gosec - unconvert - staticcheck - - varcheck - - deadcode - - scopelint + - exportloopref - unused # We don't want to skip builtin/ @@ -44,18 +42,6 @@ issues: exclude-use-default: false exclude-rules: - - path: node/modules/lp2p - linters: - - golint - - - path: build/params_.*\.go - linters: - - golint - - - path: api/apistruct/struct.go - linters: - - golint - - path: .*_test.go linters: - gosec @@ -68,11 +54,6 @@ issues: linters: - gosec - - path: api/test/.* - text: "context.Context should be the first parameter" - linters: - - golint - linters-settings: goconst: min-occurrences: 6 diff --git a/api/api_full.go b/api/api_full.go index bbfcae0a2eb..7efb01214b4 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -552,17 +552,17 @@ type FullNode interface { StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if // pending allocation is not found. - StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read + StateGetAllocationForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID - StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read + StateGetAllocationIdForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read // StateGetAllocation returns the allocation for a given address and allocation ID. - StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetAllAllocations returns the all the allocations available in verified registry actor. StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetClaim returns the claim for a given address and claim ID. - StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read + StateGetClaim(ctx context.Context, providerAddr address.Address, claimID verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read // StateGetClaims returns the all the claims for a given provider. StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read // StateGetAllClaims returns the all the claims available in verified registry actor. diff --git a/api/api_gateway.go b/api/api_gateway.go index e70c00e3a26..2e08e436860 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -67,9 +67,9 @@ type Gateway interface { StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) - StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) - StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) + StateGetClaim(ctx context.Context, providerAddr address.Address, claimID verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) diff --git a/api/api_storage.go b/api/api_storage.go index 410fa2af16c..4e4ca94eb3f 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -179,7 +179,7 @@ type StorageMiner interface { SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin // SealingSchedRemove removes a request from sealing pipeline - SealingRemoveRequest(ctx context.Context, schedId uuid.UUID) error //perm:admin + SealingRemoveRequest(ctx context.Context, schedID uuid.UUID) error //perm:admin // paths.SectorIndex StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin diff --git a/api/api_test.go b/api/api_test.go index 1316d9fa4ca..91c644115f4 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -121,7 +121,7 @@ func TestReturnTypes(t *testing.T) { t.Run("worker", tst(new(Worker))) } -func TestPermTags(t *testing.T) { +func TestPermTags(_ *testing.T) { //stm: @OTHER_IMPLEMENTATION_PERM_TAGS_001 _ = PermissionedFullAPI(&FullNodeStruct{}) _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) diff --git a/api/client/client.go b/api/client/client.go index 4a8ff927227..b1e2447eb43 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -54,25 +54,25 @@ func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Heade } func getPushUrl(addr string) (string, error) { - pushUrl, err := url.Parse(addr) + pushURL, err := url.Parse(addr) if err != nil { return "", err } - switch pushUrl.Scheme { + switch pushURL.Scheme { case "ws": - pushUrl.Scheme = "http" + pushURL.Scheme = "http" case "wss": - pushUrl.Scheme = "https" + pushURL.Scheme = "https" } ///rpc/v0 -> /rpc/streams/v0/push - pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push") - return pushUrl.String(), nil + pushURL.Path = path.Join(pushURL.Path, "../streams/v0/push") + return pushURL.String(), nil } // NewStorageMinerRPCV0 creates a new http jsonrpc client for miner func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) { - pushUrl, err := getPushUrl(addr) + pushURL, err := getPushUrl(addr) if err != nil { return nil, nil, err } @@ -81,7 +81,7 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", api.GetInternalStructs(&res), requestHeader, append([]jsonrpc.Option{ - rpcenc.ReaderParamEncoder(pushUrl), + rpcenc.ReaderParamEncoder(pushURL), jsonrpc.WithErrors(api.RPCErrors), }, opts...)...) @@ -89,7 +89,7 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H } func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) { - pushUrl, err := getPushUrl(addr) + pushURL, err := getPushUrl(addr) if err != nil { return nil, nil, err } @@ -98,7 +98,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", api.GetInternalStructs(&res), requestHeader, - rpcenc.ReaderParamEncoder(pushUrl), + rpcenc.ReaderParamEncoder(pushURL), jsonrpc.WithNoReconnect(), jsonrpc.WithTimeout(30*time.Second), jsonrpc.WithErrors(api.RPCErrors), diff --git a/api/types.go b/api/types.go index b7dbe7b3625..59317610cd0 100644 --- a/api/types.go +++ b/api/types.go @@ -1,3 +1,5 @@ +// revive:disable var-naming + package api import ( diff --git a/blockstore/api.go b/blockstore/api.go index 090f53e5a99..97868e87a5b 100644 --- a/blockstore/api.go +++ b/blockstore/api.go @@ -64,10 +64,8 @@ func (a *apiBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) erro return nil } -func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (a *apiBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { return nil, xerrors.New("not supported") } -func (a *apiBlockstore) HashOnRead(enabled bool) { - return -} +func (a *apiBlockstore) HashOnRead(_ bool) {} diff --git a/blockstore/buffered.go b/blockstore/buffered.go index 2a789b6371a..9cfc12e65fb 100644 --- a/blockstore/buffered.go +++ b/blockstore/buffered.go @@ -109,11 +109,9 @@ func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) er func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { // both stores are viewable. - if err := bs.write.View(ctx, c, callback); ipld.IsNotFound(err) { - // not found in write blockstore; fall through. - } else { + if err := bs.write.View(ctx, c, callback); !ipld.IsNotFound(err) { return err // propagate errors, or nil, i.e. found. - } + } // else not found in write blockstore; fall through. return bs.read.View(ctx, c, callback) } diff --git a/blockstore/mem.go b/blockstore/mem.go index 8dbf4b719ad..f3e2c8ef849 100644 --- a/blockstore/mem.go +++ b/blockstore/mem.go @@ -104,6 +104,6 @@ func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) // HashOnRead specifies if every read block should be // rehashed to make sure it matches its CID. -func (m MemBlockstore) HashOnRead(enabled bool) { +func (m MemBlockstore) HashOnRead(_ bool) { // no-op } diff --git a/blockstore/net.go b/blockstore/net.go index c4a88cfc970..6283af2b759 100644 --- a/blockstore/net.go +++ b/blockstore/net.go @@ -1,3 +1,5 @@ +// revive:disable var-naming + package blockstore import ( diff --git a/blockstore/net_ws.go b/blockstore/net_ws.go index 5c9a70d8435..e6a76a159d7 100644 --- a/blockstore/net_ws.go +++ b/blockstore/net_ws.go @@ -15,7 +15,7 @@ type wsWrapper struct { nextMsg []byte } -func (w *wsWrapper) Read(b []byte) (int, error) { +func (w *wsWrapper) Read(_ []byte) (int, error) { return 0, xerrors.New("read unsupported") } @@ -47,7 +47,7 @@ func (w *wsWrapper) ReadMsg() ([]byte, error) { return mbuf.Bytes(), nil } -func (w *wsWrapper) ReleaseMsg(bytes []byte) { +func (w *wsWrapper) ReleaseMsg(_ []byte) { // todo use a pool } @@ -71,7 +71,7 @@ func (w *wsWrapper) NextMsgLen() (int, error) { return len(w.nextMsg), nil } -func (w *wsWrapper) Write(bytes []byte) (int, error) { +func (w *wsWrapper) Write(_ []byte) (int, error) { return 0, xerrors.New("write unsupported") } diff --git a/blockstore/sync.go b/blockstore/sync.go index 4f0cf830ee6..43830f204e6 100644 --- a/blockstore/sync.go +++ b/blockstore/sync.go @@ -78,6 +78,6 @@ func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error return m.bs.AllKeysChan(ctx) } -func (m *SyncBlockstore) HashOnRead(enabled bool) { +func (m *SyncBlockstore) HashOnRead(_ bool) { // noop } diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 4d2a6674c57..6d046306712 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -6,7 +6,7 @@ import ( "github.com/ipfs/go-cid" ) -var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{ +var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ Network: "butterflynet", Version: 8, diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 5831e513782..056a568314b 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -1,6 +1,8 @@ //go:build !debug && !2k && !testground && !calibnet && !butterflynet && !interopnet // +build !debug,!2k,!testground,!calibnet,!butterflynet,!interopnet +// revive:disable:var-naming + package build import ( diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 8a3f6550124..b4624db7d92 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -1,6 +1,8 @@ //go:build !testground // +build !testground +// revive:disable:var-naming + package build import ( @@ -124,6 +126,7 @@ const MinimumBaseFee = 100 const PackingEfficiencyNum = 4 const PackingEfficiencyDenom = 5 +// revive:disable-next-line:exported // Actor consts // TODO: pieceSize unused from actors var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) diff --git a/chain/actors/aerrors/error_test.go b/chain/actors/aerrors/error_test.go index 8c3738c8816..146c305a31b 100644 --- a/chain/actors/aerrors/error_test.go +++ b/chain/actors/aerrors/error_test.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" - . "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" ) func TestFatalError(t *testing.T) { @@ -17,24 +17,24 @@ func TestFatalError(t *testing.T) { e1 := xerrors.New("out of disk space") e2 := xerrors.Errorf("could not put node: %w", e1) e3 := xerrors.Errorf("could not save head: %w", e2) - ae := Escalate(e3, "failed to save the head") - aw1 := Wrap(ae, "saving head of new miner actor") - aw2 := Absorb(aw1, 1, "try to absorb fatal error") - aw3 := Wrap(aw2, "initializing actor") - aw4 := Wrap(aw3, "creating miner in storage market") + ae := aerrors.Escalate(e3, "failed to save the head") + aw1 := aerrors.Wrap(ae, "saving head of new miner actor") + aw2 := aerrors.Absorb(aw1, 1, "try to absorb fatal error") + aw3 := aerrors.Wrap(aw2, "initializing actor") + aw4 := aerrors.Wrap(aw3, "creating miner in storage market") t.Logf("Verbose error: %+v", aw4) t.Logf("Normal error: %v", aw4) - assert.True(t, IsFatal(aw4), "should be fatal") + assert.True(t, aerrors.IsFatal(aw4), "should be fatal") } func TestAbsorbeError(t *testing.T) { //stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001 e1 := xerrors.New("EOF") e2 := xerrors.Errorf("could not decode: %w", e1) - ae := Absorb(e2, 35, "failed to decode CBOR") - aw1 := Wrap(ae, "saving head of new miner actor") - aw2 := Wrap(aw1, "initializing actor") - aw3 := Wrap(aw2, "creating miner in storage market") + ae := aerrors.Absorb(e2, 35, "failed to decode CBOR") + aw1 := aerrors.Wrap(ae, "saving head of new miner actor") + aw2 := aerrors.Wrap(aw1, "initializing actor") + aw3 := aerrors.Wrap(aw2, "creating miner in storage market") t.Logf("Verbose error: %+v", aw3) t.Logf("Normal error: %v", aw3) - assert.Equal(t, exitcode.ExitCode(35), RetCode(aw3)) + assert.Equal(t, exitcode.ExitCode(35), aerrors.RetCode(aw3)) } diff --git a/chain/actors/aerrors/wrap.go b/chain/actors/aerrors/wrap.go index 6bf8a30090c..03281d7fcff 100644 --- a/chain/actors/aerrors/wrap.go +++ b/chain/actors/aerrors/wrap.go @@ -71,7 +71,7 @@ func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interf } } -func Fatal(message string, args ...interface{}) ActorError { +func Fatal(message string, _ ...interface{}) ActorError { return &actorError{ fatal: true, msg: message, diff --git a/chain/actors/manifest.go b/chain/actors/manifest.go index 62c17193a57..4124f826c9a 100644 --- a/chain/actors/manifest.go +++ b/chain/actors/manifest.go @@ -51,10 +51,10 @@ func RegisterManifest(av actorstypes.Version, manifestCid cid.Cid, entries map[s } } -func AddActorMeta(name string, codeId cid.Cid, av actorstypes.Version) { +func AddActorMeta(name string, codeID cid.Cid, av actorstypes.Version) { manifestMx.Lock() defer manifestMx.Unlock() - actorMeta[codeId] = actorEntry{name: name, version: av} + actorMeta[codeID] = actorEntry{name: name, version: av} } // GetManifest gets a loaded manifest. diff --git a/chain/beacon/mock.go b/chain/beacon/mock.go index ab6a98ebfe9..5b9c9814a6a 100644 --- a/chain/beacon/mock.go +++ b/chain/beacon/mock.go @@ -51,7 +51,7 @@ func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { return out } -func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, _prevEntrySig []byte) error { +func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, _ []byte) error { // TODO: cache this, especially for bls oe := mb.entryForIndex(from.Round) if !bytes.Equal(from.Data, oe.Data) { @@ -60,7 +60,7 @@ func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, _prevEntrySig []byte) return nil } -func (mb *mockBeacon) MaxBeaconRoundForEpoch(nv network.Version, epoch abi.ChainEpoch) uint64 { +func (mb *mockBeacon) MaxBeaconRoundForEpoch(_ network.Version, epoch abi.ChainEpoch) uint64 { // offset for better testing return uint64(epoch + 100) } diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 9bf7213c82f..537f37cb9c4 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -61,7 +61,7 @@ var ddls = []string{ createIndexEventEntryIndexedKey, createIndexEventEntryCodecValue, - createIndexEventEntryEventId, + createIndexEventEntryEventID, // metadata containing version of schema `CREATE TABLE IF NOT EXISTS _meta ( @@ -95,7 +95,7 @@ const ( createIndexEventEntryIndexedKey = `CREATE INDEX IF NOT EXISTS event_entry_indexed_key ON event_entry (indexed, key);` createIndexEventEntryCodecValue = `CREATE INDEX IF NOT EXISTS event_entry_codec_value ON event_entry (codec, value);` - createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);` + createIndexEventEntryEventID = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);` ) type EventIndex struct { @@ -209,9 +209,9 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C return xerrors.Errorf("delete off chain event: %w", err) } - // find the first eventId from the last time the tipset was applied - var eventId sql.NullInt64 - err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) + // find the first eventID from the last time the tipset was applied + var eventID sql.NullInt64 + err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventID) if err != nil { if errors.Is(err, sql.ErrNoRows) { continue @@ -220,12 +220,12 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C } // this tipset might not have any events which is ok - if !eventId.Valid { + if !eventID.Valid { continue } - log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height()) + log.Debugf("Deleting all events with id < %d at height %d", eventID.Int64, currTs.Height()) - res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64) + res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventID.Int64) if err != nil { return xerrors.Errorf("delete event: %w", err) } @@ -349,7 +349,7 @@ func (ei *EventIndex) migrateToVersion4(ctx context.Context) error { {"create index event_reverted", createIndexEventReverted}, {"create index event_entry_indexed_key", createIndexEventEntryIndexedKey}, {"create index event_entry_codec_value", createIndexEventEntryCodecValue}, - {"create index event_entry_event_id", createIndexEventEntryEventId}, + {"create index event_entry_event_id", createIndexEventEntryEventID}, } { _, err = tx.ExecContext(ctx, create.query) if err != nil { diff --git a/chain/events/filter/tipset.go b/chain/events/filter/tipset.go index be734c6f74f..aa8f11a3e4c 100644 --- a/chain/events/filter/tipset.go +++ b/chain/events/filter/tipset.go @@ -79,7 +79,7 @@ type TipSetFilterManager struct { filters map[types.FilterID]*TipSetFilter } -func (m *TipSetFilterManager) Apply(ctx context.Context, from, to *types.TipSet) error { +func (m *TipSetFilterManager) Apply(ctx context.Context, _, to *types.TipSet) error { m.mu.Lock() defer m.mu.Unlock() if len(m.filters) == 0 { diff --git a/chain/events/state/fastapi.go b/chain/events/state/fastapi.go index 9375d9d7846..147cf3585d1 100644 --- a/chain/events/state/fastapi.go +++ b/chain/events/state/fastapi.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +// revive:disable-next-line:var-naming type FastChainApiAPI interface { ChainAPI diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 79c1d2e0eae..b1fe6faccb9 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -158,8 +158,8 @@ func TestMarketPredicates(t *testing.T) { t.Run("deal ID predicate", func(t *testing.T) { preds := NewStatePredicates(api) - dealIds := []abi.DealID{abi.DealID(1), abi.DealID(2)} - diffIDFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIds))) + dealIDs := []abi.DealID{abi.DealID(1), abi.DealID(2)} + diffIDFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIDs))) // Diff a state against itself: expect no change changed, _, err := diffIDFn(ctx, oldState.Key(), oldState.Key()) diff --git a/chain/exchange/peer_tracker.go b/chain/exchange/peer_tracker.go index 00b919d23be..3fe02933a0c 100644 --- a/chain/exchange/peer_tracker.go +++ b/chain/exchange/peer_tracker.go @@ -99,7 +99,7 @@ func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { var costI, costJ float64 - getPeerInitLat := func(p peer.ID) float64 { + getPeerInitLat := func(_ peer.ID) float64 { return float64(bpt.avgGlobalTime) * newPeerMul } diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index 71b5dad9ad9..25c70ba5828 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -99,7 +99,7 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par return cid.Undef, false, nil } -func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, bool, error) { +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, _ string) (cid.Cid, bool, error) { fault, err := t.Has(ctx, key) if err != nil { return cid.Undef, false, xerrors.Errorf("failed to read from datastore: %w", err) diff --git a/chain/gen/slashfilter/slashsvc/slashservice.go b/chain/gen/slashfilter/slashsvc/slashservice.go index 7a662288098..34d1011a1e6 100644 --- a/chain/gen/slashfilter/slashsvc/slashservice.go +++ b/chain/gen/slashfilter/slashsvc/slashservice.go @@ -23,6 +23,7 @@ import ( var log = logging.Logger("slashsvc") +// revive:disable-next-line:var-naming type ConsensusSlasherApi interface { ChainHead(context.Context) (*types.TipSet, error) ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index 9a221751a75..3fe10845cd2 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -94,17 +94,17 @@ func TestResolveCache(t *testing.T) { if err != nil { t.Fatal(err) } - nonId := address.NewForTestGetter()() + nonID := address.NewForTestGetter()() id, _ := address.NewIDAddress(1000) st.lookupIDFun = func(a address.Address) (address.Address, error) { - if a == nonId { + if a == nonID { return id, nil } return address.Undef, types.ErrActorNotFound } - err = st.SetActor(nonId, &types.Actor{Nonce: 1}) + err = st.SetActor(nonID, &types.Actor{Nonce: 1}) if err != nil { t.Fatal(err) } @@ -114,19 +114,19 @@ func TestResolveCache(t *testing.T) { if err != nil { t.Fatal(err) } - act, err := st.GetActor(nonId) + act, err := st.GetActor(nonID) if err != nil { t.Fatal(err) } if act.Nonce != 1 { t.Fatalf("expected nonce 1, got %d", act.Nonce) } - err = st.SetActor(nonId, &types.Actor{Nonce: 2}) + err = st.SetActor(nonID, &types.Actor{Nonce: 2}) if err != nil { t.Fatal(err) } - act, err = st.GetActor(nonId) + act, err = st.GetActor(nonID) if err != nil { t.Fatal(err) } @@ -140,7 +140,7 @@ func TestResolveCache(t *testing.T) { st.ClearSnapshot() } - act, err := st.GetActor(nonId) + act, err := st.GetActor(nonID) if err != nil { t.Fatal(err) } @@ -153,19 +153,19 @@ func TestResolveCache(t *testing.T) { if err != nil { t.Fatal(err) } - act, err := st.GetActor(nonId) + act, err := st.GetActor(nonID) if err != nil { t.Fatal(err) } if act.Nonce != 1 { t.Fatalf("expected nonce 1, got %d", act.Nonce) } - err = st.SetActor(nonId, &types.Actor{Nonce: 2}) + err = st.SetActor(nonID, &types.Actor{Nonce: 2}) if err != nil { t.Fatal(err) } - act, err = st.GetActor(nonId) + act, err = st.GetActor(nonID) if err != nil { t.Fatal(err) } @@ -175,7 +175,7 @@ func TestResolveCache(t *testing.T) { st.ClearSnapshot() } - act, err = st.GetActor(nonId) + act, err = st.GetActor(nonID) if err != nil { t.Fatal(err) } diff --git a/chain/store/messages.go b/chain/store/messages.go index 4129a9199a5..5175483071d 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -82,7 +82,7 @@ func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { cids []cid.Cid cborCid cbg.CborCid ) - if err := a.ForEach(&cborCid, func(i int64) error { + if err := a.ForEach(&cborCid, func(_ int64) error { c := cid.Cid(cborCid) cids = append(cids, c) return nil @@ -114,11 +114,11 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet) return nil, xerrors.Errorf("failed to load state tree at tipset %s: %w", ts, err) } - useIds := false + useIDs := false selectMsg := func(m *types.Message) (bool, error) { var sender address.Address if ts.Height() >= build.UpgradeHyperdriveHeight { - if useIds { + if useIDs { sender, err = st.LookupIDAddress(m.From) if err != nil { return false, xerrors.Errorf("failed to resolve sender: %w", err) @@ -129,7 +129,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet) sender = m.From } else { // uh-oh, we actually have an ID-sender! - useIds = true + useIDs = true for robust, nonce := range applied { resolved, err := st.LookupIDAddress(robust) if err != nil { diff --git a/chain/store/store.go b/chain/store/store.go index b1431c2ee1a..3237bbe23ff 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -188,7 +188,7 @@ func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dsto return nil } - hcmetric := func(rev, app []*types.TipSet) error { + hcmetric := func(_, app []*types.TipSet) error { for _, r := range app { stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height()))) } @@ -305,6 +305,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange // Unsubscribe. cs.bestTips.Unsub(subch) + // revive:disable-next-line empty-block // Drain the channel. for range subch { } diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index a3b1d01502a..2ba341e4923 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -301,7 +301,7 @@ func (tx *EthTxArgs) ToRlpSignedMsg() ([]byte, error) { } func (tx *EthTxArgs) packTxFields() ([]interface{}, error) { - chainId, err := formatInt(tx.ChainID) + chainID, err := formatInt(tx.ChainID) if err != nil { return nil, err } @@ -332,7 +332,7 @@ func (tx *EthTxArgs) packTxFields() ([]interface{}, error) { } res := []interface{}{ - chainId, + chainID, nonce, maxPriorityFeePerGas, maxFeePerGas, @@ -428,22 +428,22 @@ func RecoverSignature(sig typescrypto.Signature) (r, s, v EthBigInt, err error) return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("signature should be 65 bytes long, but got %d bytes", len(sig.Data)) } - r_, err := parseBigInt(sig.Data[0:32]) + rI, err := parseBigInt(sig.Data[0:32]) if err != nil { return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse r into EthBigInt") } - s_, err := parseBigInt(sig.Data[32:64]) + sI, err := parseBigInt(sig.Data[32:64]) if err != nil { return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse s into EthBigInt") } - v_, err := parseBigInt([]byte{sig.Data[64]}) + vI, err := parseBigInt([]byte{sig.Data[64]}) if err != nil { return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse v into EthBigInt") } - return EthBigInt(r_), EthBigInt(s_), EthBigInt(v_), nil + return EthBigInt(rI), EthBigInt(sI), EthBigInt(vI), nil } func parseEip1559Tx(data []byte) (*EthTxArgs, error) { @@ -464,7 +464,7 @@ func parseEip1559Tx(data []byte) (*EthTxArgs, error) { return nil, fmt.Errorf("not an EIP-1559 transaction: should have 12 elements in the rlp list") } - chainId, err := parseInt(decoded[0]) + chainID, err := parseInt(decoded[0]) if err != nil { return nil, err } @@ -532,7 +532,7 @@ func parseEip1559Tx(data []byte) (*EthTxArgs, error) { } args := EthTxArgs{ - ChainID: chainId, + ChainID: chainID, Nonce: nonce, To: to, MaxPriorityFeePerGas: maxPriorityFeePerGas, diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index 2740a3e9d25..090f9b06d5f 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -1,3 +1,5 @@ +// revivie:disable:var-naming + package ethtypes import ( @@ -927,7 +929,7 @@ func NewEthBlockNumberOrHashFromNumber(number EthUint64) EthBlockNumberOrHash { func NewEthBlockNumberOrHashFromHexString(str string) (EthBlockNumberOrHash, error) { // check if block param is a number (decimal or hex) - var num EthUint64 = 0 + var num EthUint64 err := num.UnmarshalJSON([]byte(str)) if err != nil { return NewEthBlockNumberOrHashFromNumber(0), err diff --git a/chain/types/execresult.go b/chain/types/execresult.go index 99bbb6ece9a..76a4a9ba62a 100644 --- a/chain/types/execresult.go +++ b/chain/types/execresult.go @@ -29,6 +29,7 @@ type MessageTrace struct { } type ActorTrace struct { + // revive:disable-next-line var-naming Id abi.ActorID State Actor } diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go index 76af663c780..f966b9f6362 100644 --- a/chain/wallet/wallet.go +++ b/chain/wallet/wallet.go @@ -359,7 +359,7 @@ func (n nilDefault) GetDefault() (address.Address, error) { return address.Undef, nil } -func (n nilDefault) SetDefault(a address.Address) error { +func (n nilDefault) SetDefault(_ address.Address) error { return xerrors.Errorf("not supported; local wallet disabled") } diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go index 002524070cc..3a2c72e33b4 100644 --- a/cmd/lotus-sim/simulation/mock/mock.go +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -72,7 +72,7 @@ func (mockVerifier) VerifyAggregateSeals(aggregate prooftypes.AggregateSealVerif } // TODO: do the thing -func (mockVerifier) VerifyReplicaUpdate(update prooftypes.ReplicaUpdateInfo) (bool, error) { +func (mockVerifier) VerifyReplicaUpdate(_ prooftypes.ReplicaUpdateInfo) (bool, error) { return false, nil } diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go index 3a8b2b50af5..125e3a4347c 100644 --- a/conformance/chaos/actor.go +++ b/conformance/chaos/actor.go @@ -274,9 +274,8 @@ type AbortWithArgs struct { func (a Actor) AbortWith(rt runtime2.Runtime, args *AbortWithArgs) *abi.EmptyValue { if args.Uncontrolled { // uncontrolled abort: directly panic panic(args.Message) - } else { - rt.Abortf(args.Code, args.Message) } + rt.Abortf(args.Code, args.Message) return nil } diff --git a/curiosrc/gc/storage_endpoint_gc.go b/curiosrc/gc/storage_endpoint_gc.go index d49c51a1bb3..dcad7e24e22 100644 --- a/curiosrc/gc/storage_endpoint_gc.go +++ b/curiosrc/gc/storage_endpoint_gc.go @@ -39,7 +39,7 @@ func NewStorageEndpointGC(si *paths.DBIndex, remote *paths.Remote, db *harmonydb } } -func (s *StorageEndpointGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { +func (s *StorageEndpointGC) Do(_ harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { /* 1. Get all storage paths + urls (endpoints) 2. Ping each url, record results @@ -262,7 +262,7 @@ func (s *StorageEndpointGC) Do(taskID harmonytask.TaskID, stillOwned func() bool return true, nil } -func (s *StorageEndpointGC) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { +func (s *StorageEndpointGC) CanAccept(ids []harmonytask.TaskID, _ *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { id := ids[0] return &id, nil } @@ -280,7 +280,7 @@ func (s *StorageEndpointGC) TypeDetails() harmonytask.TaskTypeDetails { } } -func (s *StorageEndpointGC) Adder(taskFunc harmonytask.AddTaskFunc) { +func (s *StorageEndpointGC) Adder(_ harmonytask.AddTaskFunc) { // lazy endpoint, added when bored return } diff --git a/gen/bundle/bundle.go b/gen/bundle/bundle.go index f57ced15b43..64c13ec32c5 100644 --- a/gen/bundle/bundle.go +++ b/gen/bundle/bundle.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/lotus/build" ) -var tmpl *template.Template = template.Must(template.New("actor-metadata").Parse(` +var tmpl = template.Must(template.New("actor-metadata").Parse(` // WARNING: This file has automatically been generated package build diff --git a/genesis/types.go b/genesis/types.go index 815a2f0083a..0effe1da205 100644 --- a/genesis/types.go +++ b/genesis/types.go @@ -1,3 +1,5 @@ +// revive:disable var-naming + package genesis import ( diff --git a/journal/registry_test.go b/journal/registry_test.go index 65956daf741..1b2466abee5 100644 --- a/journal/registry_test.go +++ b/journal/registry_test.go @@ -12,7 +12,7 @@ func TestDisabledEvents(t *testing.T) { req := require.New(t) test := func(dis DisabledEvents) func(*testing.T) { - return func(t *testing.T) { + return func(_ *testing.T) { registry := NewEventTypeRegistry(dis) reg1 := registry.RegisterEventType("system1", "disabled1") diff --git a/lib/harmony/harmonydb/harmonydb.go b/lib/harmony/harmonydb/harmonydb.go index 56b5acdfee2..10c3bc6d8c3 100644 --- a/lib/harmony/harmonydb/harmonydb.go +++ b/lib/harmony/harmonydb/harmonydb.go @@ -102,7 +102,7 @@ func New(hosts []string, username, password, database, port string, itestID ITes cfg.ConnConfig.Fallbacks = append(cfg.ConnConfig.Fallbacks, &pgconn.FallbackConfig{Host: h}) } - cfg.ConnConfig.OnNotice = func(conn *pgconn.PgConn, n *pgconn.Notice) { + cfg.ConnConfig.OnNotice = func(_ *pgconn.PgConn, n *pgconn.Notice) { logger.Debug("database notice: " + n.Message + ": " + n.Detail) DBMeasures.Errors.M(1) } @@ -123,10 +123,10 @@ type ctxkey string const SQL_START = ctxkey("sqlStart") const SQL_STRING = ctxkey("sqlString") -func (t tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context { +func (t tracer) TraceQueryStart(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryStartData) context.Context { return context.WithValue(context.WithValue(ctx, SQL_START, time.Now()), SQL_STRING, data.SQL) } -func (t tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) { +func (t tracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) { DBMeasures.Hits.M(1) ms := time.Since(ctx.Value(SQL_START).(time.Time)).Milliseconds() DBMeasures.TotalWait.M(ms) diff --git a/lib/harmony/resources/miniopencl/mini_opencl.go b/lib/harmony/resources/miniopencl/mini_opencl.go index d2486a88f66..209049086bd 100644 --- a/lib/harmony/resources/miniopencl/mini_opencl.go +++ b/lib/harmony/resources/miniopencl/mini_opencl.go @@ -17,9 +17,9 @@ type Platform struct { // Obtain the list of platforms available. func GetPlatforms() ([]*Platform, error) { - var platformIds [maxPlatforms]C.cl_platform_id + var platformIDs [maxPlatforms]C.cl_platform_id var nPlatforms C.cl_uint - err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms) + err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIDs[0], &nPlatforms) if err == -1001 { // No platforms found return nil, nil } @@ -28,7 +28,7 @@ func GetPlatforms() ([]*Platform, error) { } platforms := make([]*Platform, nPlatforms) for i := 0; i < int(nPlatforms); i++ { - platforms[i] = &Platform{id: platformIds[i]} + platforms[i] = &Platform{id: platformIDs[i]} } return platforms, nil } @@ -46,13 +46,13 @@ type Device struct { } func (p *Platform) GetAllDevices() ([]*Device, error) { - var deviceIds [maxDeviceCount]C.cl_device_id + var deviceIDs [maxDeviceCount]C.cl_device_id var numDevices C.cl_uint - var platformId C.cl_platform_id + var platformID C.cl_platform_id if p != nil { - platformId = p.id + platformID = p.id } - if err := C.clGetDeviceIDs(platformId, C.cl_device_type(DeviceTypeAll), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS { + if err := C.clGetDeviceIDs(platformID, C.cl_device_type(DeviceTypeAll), C.cl_uint(maxDeviceCount), &deviceIDs[0], &numDevices); err != C.CL_SUCCESS { return nil, toError(err) } if numDevices > maxDeviceCount { @@ -60,7 +60,7 @@ func (p *Platform) GetAllDevices() ([]*Device, error) { } devices := make([]*Device, numDevices) for i := 0; i < int(numDevices); i++ { - devices[i] = &Device{id: deviceIds[i]} + devices[i] = &Device{id: deviceIDs[i]} } return devices, nil } diff --git a/lib/harmony/resources/resources.go b/lib/harmony/resources/resources.go index 33bc80d6fe7..b1010cf57ae 100644 --- a/lib/harmony/resources/resources.go +++ b/lib/harmony/resources/resources.go @@ -20,8 +20,11 @@ import ( var LOOKS_DEAD_TIMEOUT = 10 * time.Minute // Time w/o minute heartbeats type Resources struct { - Cpu int - Gpu float64 + // revive:disable-next-line:var-naming + Cpu int + // revive:disable-next-line:var-naming + Gpu float64 + // revive:disable-next-line:var-naming Ram uint64 MachineID int Storage diff --git a/lib/httpreader/httpreader.go b/lib/httpreader/httpreader.go index 62338e76ec8..d2ccc00d6f5 100644 --- a/lib/httpreader/httpreader.go +++ b/lib/httpreader/httpreader.go @@ -1,3 +1,5 @@ +// revive:disable var-naming + package httpreader import ( diff --git a/lib/parmap/parmap.go b/lib/parmap/parmap.go index dcf0ef3c8e7..7385213fdd7 100644 --- a/lib/parmap/parmap.go +++ b/lib/parmap/parmap.go @@ -53,7 +53,7 @@ func KVMapArr(in interface{}) interface{} { k := it.Key() v := it.Value() - rout.Index(i).Set(reflect.MakeFunc(t, func(args []reflect.Value) (results []reflect.Value) { + rout.Index(i).Set(reflect.MakeFunc(t, func(_ []reflect.Value) (results []reflect.Value) { return []reflect.Value{k, v} })) i++ diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go index 2dd64473e7e..8efbdbe8c12 100644 --- a/lib/rpcenc/reader.go +++ b/lib/rpcenc/reader.go @@ -1,3 +1,5 @@ +// revive:disable:var-naming + package rpcenc import ( @@ -45,7 +47,7 @@ type ReaderStream struct { var client = func() *http.Client { c := *http.DefaultClient - c.CheckRedirect = func(req *http.Request, via []*http.Request) error { + c.CheckRedirect = func(_ *http.Request, _ []*http.Request) error { return http.ErrUseLastResponse } return &c diff --git a/lib/rpcenc/reader_test.go b/lib/rpcenc/reader_test.go index 3a554a0ca1e..51bae155c27 100644 --- a/lib/rpcenc/reader_test.go +++ b/lib/rpcenc/reader_test.go @@ -21,19 +21,19 @@ import ( ) type ReaderHandler struct { - readApi func(ctx context.Context, r io.Reader) ([]byte, error) + readAPI func(ctx context.Context, r io.Reader) ([]byte, error) cont chan struct{} subErr error } -func (h *ReaderHandler) ReadAllApi(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) { +func (h *ReaderHandler) ReadAllAPI(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) { if mustRedir { if err := r.(*RpcReader).MustRedirect(); err != nil { return nil, err } } - return h.readApi(ctx, r) + return h.readAPI(ctx, r) } func (h *ReaderHandler) ReadAllWaiting(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) { @@ -47,14 +47,14 @@ func (h *ReaderHandler) ReadAllWaiting(ctx context.Context, r io.Reader, mustRed <-h.cont var m []byte - m, h.subErr = h.readApi(ctx, r) + m, h.subErr = h.readAPI(ctx, r) h.cont <- struct{}{} return m, h.subErr } -func (h *ReaderHandler) ReadStartAndApi(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) { +func (h *ReaderHandler) ReadStartAndAPI(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) { if mustRedir { if err := r.(*RpcReader).MustRedirect(); err != nil { return nil, err @@ -69,14 +69,14 @@ func (h *ReaderHandler) ReadStartAndApi(ctx context.Context, r io.Reader, mustRe return nil, xerrors.Errorf("not one") } - return h.readApi(ctx, r) + return h.readAPI(ctx, r) } -func (h *ReaderHandler) CloseReader(ctx context.Context, r io.Reader) error { +func (h *ReaderHandler) CloseReader(_ context.Context, r io.Reader) error { return r.(io.Closer).Close() } -func (h *ReaderHandler) ReadAll(ctx context.Context, r io.Reader) ([]byte, error) { +func (h *ReaderHandler) ReadAll(_ context.Context, r io.Reader) ([]byte, error) { b, err := io.ReadAll(r) if err != nil { return nil, xerrors.Errorf("readall: %w", err) @@ -85,11 +85,11 @@ func (h *ReaderHandler) ReadAll(ctx context.Context, r io.Reader) ([]byte, error return b, nil } -func (h *ReaderHandler) ReadNullLen(ctx context.Context, r io.Reader) (int64, error) { +func (h *ReaderHandler) ReadNullLen(_ context.Context, r io.Reader) (int64, error) { return r.(*nullreader.NullReader).N, nil } -func (h *ReaderHandler) ReadUrl(ctx context.Context, u string) (string, error) { +func (h *ReaderHandler) ReadUrl(_ context.Context, u string) (string, error) { return u, nil } @@ -178,13 +178,13 @@ func TestReaderRedirect(t *testing.T) { } var redirClient struct { - ReadAllApi func(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) - ReadStartAndApi func(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) + ReadAllAPI func(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) + ReadStartAndAPI func(ctx context.Context, r io.Reader, mustRedir bool) ([]byte, error) CloseReader func(ctx context.Context, r io.Reader) error } { - allServerHandler := &ReaderHandler{readApi: allClient.ReadAll} + allServerHandler := &ReaderHandler{readAPI: allClient.ReadAll} readerHandler, readerServerOpt := ReaderParamDecoder() rpcServer := jsonrpc.NewServer(readerServerOpt) rpcServer.Register("ReaderHandler", allServerHandler) @@ -204,17 +204,17 @@ func TestReaderRedirect(t *testing.T) { } // redirect - read, err := redirClient.ReadAllApi(context.TODO(), strings.NewReader("rediracted pooooootato"), true) + read, err := redirClient.ReadAllAPI(context.TODO(), strings.NewReader("rediracted pooooootato"), true) require.NoError(t, err) require.Equal(t, "rediracted pooooootato", string(read), "potatoes weren't equal") // proxy (because we started reading locally) - read, err = redirClient.ReadStartAndApi(context.TODO(), strings.NewReader("rediracted pooooootato"), false) + read, err = redirClient.ReadStartAndAPI(context.TODO(), strings.NewReader("rediracted pooooootato"), false) require.NoError(t, err) require.Equal(t, "ediracted pooooootato", string(read), "otatoes weren't equal") // check mustredir check; proxy (because we started reading locally) - read, err = redirClient.ReadStartAndApi(context.TODO(), strings.NewReader("rediracted pooooootato"), true) + read, err = redirClient.ReadStartAndAPI(context.TODO(), strings.NewReader("rediracted pooooootato"), true) require.Error(t, err) require.Contains(t, err.Error(), ErrMustRedirect.Error()) require.Empty(t, read) @@ -264,7 +264,7 @@ func testReaderRedirectDrop(t *testing.T) { } contCh := make(chan struct{}) - allServerHandler := &ReaderHandler{readApi: allClient.ReadAll, cont: contCh} + allServerHandler := &ReaderHandler{readAPI: allClient.ReadAll, cont: contCh} readerHandler, readerServerOpt := ReaderParamDecoder() rpcServer := jsonrpc.NewServer(readerServerOpt) rpcServer.Register("ReaderHandler", allServerHandler) diff --git a/lib/shardedmutex/shardedmutex_test.go b/lib/shardedmutex/shardedmutex_test.go index a7d5f7d1dfe..09d5f111e61 100644 --- a/lib/shardedmutex/shardedmutex_test.go +++ b/lib/shardedmutex/shardedmutex_test.go @@ -10,7 +10,7 @@ import ( "time" ) -func TestLockingDifferentShardsDoesNotBlock(t *testing.T) { +func TestLockingDifferentShardsDoesNotBlock(_ *testing.T) { shards := 16 sm := New(shards) done := make(chan struct{}) diff --git a/lib/ulimit/ulimit_test.go b/lib/ulimit/ulimit_test.go index 071c6013c81..ad20feb1de9 100644 --- a/lib/ulimit/ulimit_test.go +++ b/lib/ulimit/ulimit_test.go @@ -46,8 +46,8 @@ func TestManageInvalidNFds(t *testing.T) { t.Logf("setting ulimit to %d, max %d, cur %d", value, rlimit.Max, rlimit.Cur) - if changed, new, err := ManageFdLimit(); err == nil { - t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, new) + if changed, isNew, err := ManageFdLimit(); err == nil { + t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, isNew) } else if err != nil { flag := strings.Contains(err.Error(), "failed to raise ulimit to LOTUS_FD_MAX") diff --git a/markets/dagstore/miner_api.go b/markets/dagstore/miner_api.go index 8a12097d5f2..5024bfbb273 100644 --- a/markets/dagstore/miner_api.go +++ b/markets/dagstore/miner_api.go @@ -201,7 +201,7 @@ func (m *minerAPI) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (ui return 0, xerrors.Errorf("no storage deals found for piece %s", pieceCid) } - len := pieceInfo.Deals[0].Length + l := pieceInfo.Deals[0].Length - return uint64(len), nil + return uint64(l), nil } diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go index 08135b3a553..d13b098fc7f 100644 --- a/markets/dagstore/miner_api_test.go +++ b/markets/dagstore/miner_api_test.go @@ -129,9 +129,9 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) { // Check that the data length is correct //stm: @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 - len, err := api.GetUnpaddedCARSize(ctx, cid1) + l, err := api.GetUnpaddedCARSize(ctx, cid1) require.NoError(t, err) - require.EqualValues(t, 10, len) + require.EqualValues(t, 10, l) } func TestThrottle(t *testing.T) { diff --git a/markets/dagstore/wrapper.go b/markets/dagstore/wrapper.go index a929ad1fc93..f22d9481fad 100644 --- a/markets/dagstore/wrapper.go +++ b/markets/dagstore/wrapper.go @@ -53,10 +53,10 @@ type Wrapper struct { var _ stores.DAGStoreWrapper = (*Wrapper)(nil) -func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*dagstore.DAGStore, *Wrapper, error) { +func NewDAGStore(cfg config.DAGStoreConfig, minerAPI MinerAPI, _ host.Host) (*dagstore.DAGStore, *Wrapper, error) { // construct the DAG Store. registry := mount.NewRegistry() - if err := registry.Register(lotusScheme, mountTemplate(minerApi)); err != nil { + if err := registry.Register(lotusScheme, mountTemplate(minerAPI)); err != nil { return nil, nil, xerrors.Errorf("failed to create registry: %w", err) } @@ -102,7 +102,7 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da w := &Wrapper{ cfg: cfg, dagst: dagst, - minerAPI: minerApi, + minerAPI: minerAPI, failureCh: failureCh, gcInterval: time.Duration(cfg.GCInterval), } diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go index f3b5e1b52c0..dc24a81cfee 100644 --- a/markets/dagstore/wrapper_test.go +++ b/markets/dagstore/wrapper_test.go @@ -166,7 +166,7 @@ type mockDagStore struct { close chan struct{} } -func (m *mockDagStore) GetIterableIndex(key shard.Key) (carindex.IterableIndex, error) { +func (m *mockDagStore) GetIterableIndex(_ shard.Key) (carindex.IterableIndex, error) { return nil, nil } @@ -174,7 +174,7 @@ func (m *mockDagStore) ShardsContainingMultihash(ctx context.Context, h mh.Multi return nil, nil } -func (m *mockDagStore) GetShardKeysForCid(c cid.Cid) ([]shard.Key, error) { +func (m *mockDagStore) GetShardKeysForCid(_ cid.Cid) ([]shard.Key, error) { panic("implement me") } @@ -184,7 +184,7 @@ func (m *mockDagStore) DestroyShard(ctx context.Context, key shard.Key, out chan return nil } -func (m *mockDagStore) GetShardInfo(k shard.Key) (dagstore.ShardInfo, error) { +func (m *mockDagStore) GetShardInfo(_ shard.Key) (dagstore.ShardInfo, error) { panic("implement me") } diff --git a/markets/idxprov/idxprov_test/noop.go b/markets/idxprov/idxprov_test/noop.go index 535c13d2522..e8a71f57836 100644 --- a/markets/idxprov/idxprov_test/noop.go +++ b/markets/idxprov/idxprov_test/noop.go @@ -11,6 +11,6 @@ func NewNoopMeshCreator() *NoopMeshCreator { return &NoopMeshCreator{} } -func (mc NoopMeshCreator) Connect(ctx context.Context) error { +func (mc NoopMeshCreator) Connect(_ context.Context) error { return nil } diff --git a/markets/idxprov/mesh.go b/markets/idxprov/mesh.go index e69e213adab..67c8c0caa7c 100644 --- a/markets/idxprov/mesh.go +++ b/markets/idxprov/mesh.go @@ -20,7 +20,7 @@ type MeshCreator interface { } type Libp2pMeshCreator struct { - fullnodeApi v1api.FullNode + fullnodeAPI v1api.FullNode marketsHost host.Host } @@ -29,18 +29,18 @@ func (mc Libp2pMeshCreator) Connect(ctx context.Context) error { // Add the markets host ID to list of daemon's protected peers first, before any attempt to // connect to full node over libp2p. marketsPeerID := mc.marketsHost.ID() - if err := mc.fullnodeApi.NetProtectAdd(ctx, []peer.ID{marketsPeerID}); err != nil { + if err := mc.fullnodeAPI.NetProtectAdd(ctx, []peer.ID{marketsPeerID}); err != nil { return fmt.Errorf("failed to call NetProtectAdd on the full node, err: %w", err) } - faddrs, err := mc.fullnodeApi.NetAddrsListen(ctx) + faddrs, err := mc.fullnodeAPI.NetAddrsListen(ctx) if err != nil { return fmt.Errorf("failed to fetch full node listen addrs, err: %w", err) } // Connect from the full node, ask it to protect the connection and protect the connection on // markets end too. Connection is initiated form full node to avoid the need to expose libp2p port on full node - if err := mc.fullnodeApi.NetConnect(ctx, peer.AddrInfo{ + if err := mc.fullnodeAPI.NetConnect(ctx, peer.AddrInfo{ ID: mc.marketsHost.ID(), Addrs: mc.marketsHost.Addrs(), }); err != nil { @@ -54,6 +54,6 @@ func (mc Libp2pMeshCreator) Connect(ctx context.Context) error { return nil } -func NewMeshCreator(fullnodeApi v1api.FullNode, marketsHost host.Host) MeshCreator { - return Libp2pMeshCreator{fullnodeApi, marketsHost} +func NewMeshCreator(fullnodeAPI v1api.FullNode, marketsHost host.Host) MeshCreator { + return Libp2pMeshCreator{fullnodeAPI, marketsHost} } diff --git a/node/config/types.go b/node/config/types.go index c15df320fa9..c46b13c8dc2 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -1,3 +1,5 @@ +// revive:disable:var-naming + package config import ( diff --git a/node/modules/lp2p/discovery.go b/node/modules/lp2p/discovery.go index ca68a7b9cf5..ae96c04029d 100644 --- a/node/modules/lp2p/discovery.go +++ b/node/modules/lp2p/discovery.go @@ -27,6 +27,7 @@ func (dh *discoveryHandler) HandlePeerFound(p peer.AddrInfo) { } } +// revive:disable-line:unexported-return func DiscoveryHandler(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host) *discoveryHandler { return &discoveryHandler{ ctx: helpers.LifecycleCtx(mctx, lc), diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index 9c140b41ee6..81ad48b79bd 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -38,7 +38,7 @@ func Peerstore() (peerstore.Peerstore, error) { return pstoremem.NewPeerstore() } -func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { +func Host(_ helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { return nil, fmt.Errorf("missing private key for node ID: %s", params.ID) diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go index 2b3efce6c44..408ab17a63e 100644 --- a/node/modules/lp2p/pubsub.go +++ b/node/modules/lp2p/pubsub.go @@ -594,7 +594,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) { msgsRPC := evt.GetRecvRPC().GetMeta().GetMessages() // check if any of the messages we are sending belong to a trackable topic - var validTopic bool = false + var validTopic = false for _, topic := range msgsRPC { if trw.traceMessage(topic.GetTopic()) { validTopic = true @@ -602,7 +602,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) { } } // track if the Iwant / Ihave messages are from a valid Topic - var validIhave bool = false + var validIhave = false for _, msgs := range ihave { if trw.traceMessage(msgs.GetTopic()) { validIhave = true @@ -630,7 +630,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) { msgsRPC := evt.GetSendRPC().GetMeta().GetMessages() // check if any of the messages we are sending belong to a trackable topic - var validTopic bool = false + var validTopic = false for _, topic := range msgsRPC { if trw.traceMessage(topic.GetTopic()) { validTopic = true @@ -638,7 +638,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) { } } // track if the Iwant / Ihave messages are from a valid Topic - var validIhave bool = false + var validIhave = false for _, msgs := range ihave { if trw.traceMessage(msgs.GetTopic()) { validIhave = true diff --git a/node/modules/lp2p/rcmgr.go b/node/modules/lp2p/rcmgr.go index f2b2849863e..f083eb165af 100644 --- a/node/modules/lp2p/rcmgr.go +++ b/node/modules/lp2p/rcmgr.go @@ -190,7 +190,7 @@ func (r rcmgrMetrics) BlockConn(dir network.Direction, usefd bool) { stats.Record(ctx, metrics.RcmgrBlockConn.M(1)) } -func (r rcmgrMetrics) AllowStream(p peer.ID, dir network.Direction) { +func (r rcmgrMetrics) AllowStream(_ peer.ID, dir network.Direction) { ctx := context.Background() if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) @@ -200,7 +200,7 @@ func (r rcmgrMetrics) AllowStream(p peer.ID, dir network.Direction) { stats.Record(ctx, metrics.RcmgrAllowStream.M(1)) } -func (r rcmgrMetrics) BlockStream(p peer.ID, dir network.Direction) { +func (r rcmgrMetrics) BlockStream(_ peer.ID, dir network.Direction) { ctx := context.Background() if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) @@ -210,7 +210,7 @@ func (r rcmgrMetrics) BlockStream(p peer.ID, dir network.Direction) { stats.Record(ctx, metrics.RcmgrBlockStream.M(1)) } -func (r rcmgrMetrics) AllowPeer(p peer.ID) { +func (r rcmgrMetrics) AllowPeer(_ peer.ID) { ctx := context.Background() stats.Record(ctx, metrics.RcmgrAllowPeer.M(1)) } @@ -256,10 +256,10 @@ func (r rcmgrMetrics) BlockServicePeer(svc string, p peer.ID) { stats.Record(ctx, metrics.RcmgrBlockSvcPeer.M(1)) } -func (r rcmgrMetrics) AllowMemory(size int) { +func (r rcmgrMetrics) AllowMemory(_ int) { stats.Record(context.Background(), metrics.RcmgrAllowMem.M(1)) } -func (r rcmgrMetrics) BlockMemory(size int) { +func (r rcmgrMetrics) BlockMemory(_ int) { stats.Record(context.Background(), metrics.RcmgrBlockMem.M(1)) } diff --git a/node/modules/lp2p/routing.go b/node/modules/lp2p/routing.go index 816cbe2eab6..7c0629cc88f 100644 --- a/node/modules/lp2p/routing.go +++ b/node/modules/lp2p/routing.go @@ -25,7 +25,7 @@ type p2pRouterOut struct { Router Router `group:"routers"` } -func BaseRouting(lc fx.Lifecycle, in BaseIpfsRouting) (out p2pRouterOut, dr *dht.IpfsDHT) { +func BaseRouting(lc fx.Lifecycle, in BaseIpfsRouting) (_ p2pRouterOut, dr *dht.IpfsDHT) { if dht, ok := in.(*dht.IpfsDHT); ok { dr = dht diff --git a/node/modules/tracer/elasticsearch_transport.go b/node/modules/tracer/elasticsearch_transport.go index e54e0eba241..5f657b192fd 100644 --- a/node/modules/tracer/elasticsearch_transport.go +++ b/node/modules/tracer/elasticsearch_transport.go @@ -21,17 +21,17 @@ const ( ) func NewElasticSearchTransport(connectionString string, elasticsearchIndex string) (TracerTransport, error) { - conUrl, err := url.Parse(connectionString) + conURL, err := url.Parse(connectionString) if err != nil { return nil, err } - username := conUrl.User.Username() - password, _ := conUrl.User.Password() + username := conURL.User.Username() + password, _ := conURL.User.Password() cfg := elasticsearch.Config{ Addresses: []string{ - conUrl.Scheme + "://" + conUrl.Host, + conURL.Scheme + "://" + conURL.Host, }, Username: username, Password: password, @@ -99,7 +99,7 @@ func (est *elasticSearchTransport) Transport(evt TracerTransportEvent) error { esutil.BulkIndexerItem{ Action: "index", Body: bytes.NewReader(jsonEvt), - OnFailure: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem, err error) { + OnFailure: func(_ context.Context, _ esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem, err error) { if err != nil { log.Errorf("unable to submit trace - %s", err) } else { diff --git a/node/repo/blockstore_opts.go b/node/repo/blockstore_opts.go index 81f8b9ff416..e294cd6ee5e 100644 --- a/node/repo/blockstore_opts.go +++ b/node/repo/blockstore_opts.go @@ -9,7 +9,7 @@ import ( // BadgerBlockstoreOptions returns the badger options to apply for the provided // domain. -func BadgerBlockstoreOptions(domain BlockstoreDomain, path string, readonly bool) (badgerbs.Options, error) { +func BadgerBlockstoreOptions(_ BlockstoreDomain, path string, readonly bool) (badgerbs.Options, error) { opts := badgerbs.DefaultOptions(path) // Due to legacy usage of blockstore.Blockstore, over a datastore, all diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index 6a4b416e204..61b61d169b4 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -284,11 +284,11 @@ func (lmem *lockedMemRepo) SqlitePath() (string, error) { return sqlitePath, nil } -func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) { +func (lmem *lockedMemRepo) ListDatastores(_ string) ([]int64, error) { return nil, nil } -func (lmem *lockedMemRepo) DeleteDatastore(ns string) error { +func (lmem *lockedMemRepo) DeleteDatastore(_ string) error { /** poof **/ return nil } diff --git a/storage/paths/db_index.go b/storage/paths/db_index.go index e6def455112..9b95540f26f 100644 --- a/storage/paths/db_index.go +++ b/storage/paths/db_index.go @@ -49,8 +49,8 @@ func NewDBIndex(al *alerting.Alerting, db *harmonydb.DB) *DBIndex { func (dbi *DBIndex) StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) { var sectorEntries []struct { - StorageId string - MinerId sql.NullInt64 + StorageID string + MinerID sql.NullInt64 SectorNum sql.NullInt64 SectorFiletype sql.NullInt32 `db:"sector_filetype"` IsPrimary sql.NullBool @@ -64,23 +64,23 @@ func (dbi *DBIndex) StorageList(ctx context.Context) (map[storiface.ID][]storifa byID := map[storiface.ID]map[abi.SectorID]storiface.SectorFileType{} for _, entry := range sectorEntries { - id := storiface.ID(entry.StorageId) + id := storiface.ID(entry.StorageID) _, ok := byID[id] if !ok { byID[id] = map[abi.SectorID]storiface.SectorFileType{} } // skip sector info for storage paths with no sectors - if !entry.MinerId.Valid { + if !entry.MinerID.Valid { continue } - sectorId := abi.SectorID{ - Miner: abi.ActorID(entry.MinerId.Int64), + sectorID := abi.SectorID{ + Miner: abi.ActorID(entry.MinerID.Int64), Number: abi.SectorNumber(entry.SectorNum.Int64), } - byID[id][sectorId] |= storiface.SectorFileType(entry.SectorFiletype.Int32) + byID[id][sectorID] |= storiface.SectorFileType(entry.SectorFiletype.Int32) } out := map[storiface.ID][]storiface.Decl{} @@ -187,16 +187,16 @@ func (dbi *DBIndex) StorageAttach(ctx context.Context, si storiface.StorageInfo, // Single transaction to attach storage which is not present in the DB _, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { var urls sql.NullString - var storageId sql.NullString + var storageID sql.NullString err = tx.QueryRow( - "SELECT storage_id, urls FROM storage_path WHERE storage_id = $1", string(si.ID)).Scan(&storageId, &urls) + "SELECT storage_id, urls FROM storage_path WHERE storage_id = $1", string(si.ID)).Scan(&storageID, &urls) if err != nil && !strings.Contains(err.Error(), "no rows in result set") { return false, xerrors.Errorf("storage attach select fails: %v", err) } // Storage ID entry exists // TODO: Consider using insert into .. on conflict do update set ... below - if storageId.Valid { + if storageID.Valid { var currUrls []string if urls.Valid { currUrls = strings.Split(urls.String, URLSeparator) @@ -444,7 +444,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st storageWithSector := map[string]bool{} type dbRes struct { - StorageId string + StorageID string Count uint64 IsPrimary bool Urls string @@ -500,7 +500,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st } result = append(result, storiface.SectorStorageInfo{ - ID: storiface.ID(row.StorageId), + ID: storiface.ID(row.StorageID), URLs: urls, BaseURLs: burls, Weight: row.Weight * row.Count, @@ -511,7 +511,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st DenyTypes: splitString(row.DenyTypes), }) - storageWithSector[row.StorageId] = true + storageWithSector[row.StorageID] = true allowTo := splitString(row.AllowTo) if allowList != nil && len(allowTo) > 0 { @@ -540,7 +540,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st // 7. Storage path is part of the groups which are allowed from the storage paths which already hold the sector var rows []struct { - StorageId string + StorageID string Urls string Weight uint64 CanSeal bool @@ -573,12 +573,12 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st } for _, row := range rows { - if ok := storageWithSector[row.StorageId]; ok { + if ok := storageWithSector[row.StorageID]; ok { continue } if !ft.AnyAllowed(splitString(row.AllowTypes), splitString(row.DenyTypes)) { - log.Debugf("not selecting on %s, not allowed by file type filters", row.StorageId) + log.Debugf("not selecting on %s, not allowed by file type filters", row.StorageID) continue } allowMiners := splitString(row.AllowMiners) @@ -588,7 +588,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st return nil, err } if !proceed { - log.Debugf("not allocating on %s, miner %s %s", row.StorageId, s.Miner.String(), msg) + log.Debugf("not allocating on %s, miner %s %s", row.StorageID, s.Miner.String(), msg) continue } @@ -597,14 +597,14 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st allow := false for _, group := range groups { if _, found := allowList[group]; found { - log.Debugf("path %s in allowed group %s", row.StorageId, group) + log.Debugf("path %s in allowed group %s", row.StorageID, group) allow = true break } } if !allow { - log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", row.StorageId, allowList, groups) + log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", row.StorageID, allowList, groups) continue } } @@ -621,7 +621,7 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st } result = append(result, storiface.SectorStorageInfo{ - ID: storiface.ID(row.StorageId), + ID: storiface.ID(row.StorageID), URLs: urls, BaseURLs: burls, Weight: row.Weight * 0, @@ -693,7 +693,7 @@ func (dbi *DBIndex) StorageBestAlloc(ctx context.Context, allocate storiface.Sec } var rows []struct { - StorageId string + StorageID string Urls string Weight uint64 MaxStorage uint64 @@ -748,13 +748,13 @@ func (dbi *DBIndex) StorageBestAlloc(ctx context.Context, allocate storiface.Sec return nil, err } if !proceed { - log.Debugf("not allocating on %s, miner %s %s", row.StorageId, miner.String(), msg) + log.Debugf("not allocating on %s, miner %s %s", row.StorageID, miner.String(), msg) continue } } result = append(result, storiface.StorageInfo{ - ID: storiface.ID(row.StorageId), + ID: storiface.ID(row.StorageID), URLs: splitString(row.Urls), Weight: row.Weight, MaxStorage: row.MaxStorage, @@ -779,7 +779,7 @@ func isLocked(ts sql.NullTime) bool { return ts.Valid && ts.Time.After(time.Now().Add(-LockTimeOut)) } -func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType, lockUuid uuid.UUID) (bool, error) { +func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType, lockUUID uuid.UUID) (bool, error) { if read|write == 0 { return false, nil } @@ -845,7 +845,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac WHERE miner_id=$2 AND sector_num=$3 AND sector_filetype = ANY($4)`, - lockUuid.String(), + lockUUID.String(), sector.Miner, sector.Number, write.AllSet()) @@ -876,7 +876,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac return true, nil } -func (dbi *DBIndex) unlock(sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType, lockUuid uuid.UUID) (bool, error) { +func (dbi *DBIndex) unlock(sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType, lockUUID uuid.UUID) (bool, error) { ctx := context.Background() if read|write == 0 { @@ -897,7 +897,7 @@ func (dbi *DBIndex) unlock(sector abi.SectorID, read storiface.SectorFileType, w AND sector_filetype = ANY($4)`, sector.Miner, sector.Number, - lockUuid.String(), + lockUUID.String(), write.AllSet()) if err != nil { return false, xerrors.Errorf("relinquishing write locks for sector %v fails with err: %v", sector, err) @@ -929,11 +929,11 @@ func (dbi *DBIndex) StorageLock(ctx context.Context, sector abi.SectorID, read s waitTime := 1 // generate uuid for this lock owner - lockUuid := uuid.New() + lockUUID := uuid.New() // retry with exponential backoff and block until lock is acquired for { - locked, err := dbi.lock(ctx, sector, read, write, lockUuid) + locked, err := dbi.lock(ctx, sector, read, write, lockUUID) // if err is not nil and is not because we cannot acquire lock, retry if err != nil && !errors.Is(err, errAlreadyLocked) { retries-- @@ -958,7 +958,7 @@ func (dbi *DBIndex) StorageLock(ctx context.Context, sector abi.SectorID, read s go func() { <-ctx.Done() - _, err := dbi.unlock(sector, read, write, lockUuid) + _, err := dbi.unlock(sector, read, write, lockUUID) if err != nil { log.Errorf("unlocking sector %v for filetypes: read=%d, write=%d, fails with err: %v", sector, read, write, err) } @@ -969,8 +969,8 @@ func (dbi *DBIndex) StorageLock(ctx context.Context, sector abi.SectorID, read s } func (dbi *DBIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { - lockUuid := uuid.New() - locked, err := dbi.lock(ctx, sector, read, write, lockUuid) + lockUUID := uuid.New() + locked, err := dbi.lock(ctx, sector, read, write, lockUUID) if err != nil { return false, err } @@ -978,7 +978,7 @@ func (dbi *DBIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, rea if locked { go func() { <-ctx.Done() - _, err := dbi.unlock(sector, read, write, lockUuid) + _, err := dbi.unlock(sector, read, write, lockUUID) if err != nil { log.Errorf("unlocking sector %v for filetypes: read=%d, write=%d, fails with err: %v", sector, read, write, err) } @@ -991,7 +991,7 @@ func (dbi *DBIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, rea func (dbi *DBIndex) StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) { var rows []struct { - MinerId uint64 + MinerID uint64 SectorNum uint64 SectorFileType int `db:"sector_filetype"` ReadTs sql.NullTime @@ -1013,7 +1013,7 @@ func (dbi *DBIndex) StorageGetLocks(ctx context.Context) (storiface.SectorLocks, sectorLocks := make(map[abi.SectorID]locks) for _, row := range rows { sector := abi.SectorID{ - Miner: abi.ActorID(row.MinerId), + Miner: abi.ActorID(row.MinerID), Number: abi.SectorNumber(row.SectorNum), } diff --git a/storage/paths/local.go b/storage/paths/local.go index 07223ad5317..b6ac1bc9381 100644 --- a/storage/paths/local.go +++ b/storage/paths/local.go @@ -303,7 +303,7 @@ func (st *Local) open(ctx context.Context) error { return nil } -func (st *Local) Redeclare(ctx context.Context, filterId *storiface.ID, dropMissingDecls bool) error { +func (st *Local) Redeclare(ctx context.Context, filterID *storiface.ID, dropMissingDecls bool) error { st.localLk.Lock() defer st.localLk.Unlock() @@ -327,7 +327,7 @@ func (st *Local) Redeclare(ctx context.Context, filterId *storiface.ID, dropMiss log.Errorf("storage path ID changed: %s; %s -> %s", p.local, id, meta.ID) continue } - if filterId != nil && *filterId != id { + if filterID != nil && *filterID != id { continue } @@ -522,9 +522,9 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif p.reserved += overhead p.reservations[resID] = overhead - old_r := release + oldR := release release = func() { - old_r() + oldR() st.localLk.Lock() defer st.localLk.Unlock() log.Debugw("reserve release", "id", id, "sector", sid, "fileType", fileType, "overhead", overhead, "reserved-before", p.reserved, "reserved-after", p.reserved-overhead) @@ -551,7 +551,7 @@ func DoubleCallWrap(f func()) func() { } } -func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode, opts ...storiface.AcquireOption) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, _ storiface.AcquireMode, _ ...storiface.AcquireOption) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -821,16 +821,16 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s } var err error - var dest, destIds storiface.SectorPaths + var dest, destIDs storiface.SectorPaths if settings.Into == nil { - dest, destIds, err = st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) + dest, destIDs, err = st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } } else { // destination from settings dest = settings.Into.Paths - destIds = settings.Into.IDs + destIDs = settings.Into.IDs } // note: this calls allocate on types - if data is already in paths of correct type, @@ -846,7 +846,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s return xerrors.Errorf("failed to get source storage info: %w", err) } - dst, err := st.index.StorageInfo(ctx, storiface.ID(storiface.PathByType(destIds, fileType))) + dst, err := st.index.StorageInfo(ctx, storiface.ID(storiface.PathByType(destIDs, fileType))) if err != nil { return xerrors.Errorf("failed to get source storage info: %w", err) } @@ -872,8 +872,8 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, storiface.ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil { - return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, storiface.ID(storiface.PathByType(destIds, fileType)), err) + if err := st.index.StorageDeclareSector(ctx, storiface.ID(storiface.PathByType(destIDs, fileType)), s.ID, fileType, true); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, storiface.ID(storiface.PathByType(destIDs, fileType)), err) } } diff --git a/storage/paths/local_test.go b/storage/paths/local_test.go index 4bc2642dc2d..91e088b0110 100644 --- a/storage/paths/local_test.go +++ b/storage/paths/local_test.go @@ -21,7 +21,7 @@ type TestingLocalStorage struct { c storiface.StorageConfig } -func (t *TestingLocalStorage) DiskUsage(path string) (int64, error) { +func (t *TestingLocalStorage) DiskUsage(_ string) (int64, error) { return 1, nil } @@ -34,7 +34,7 @@ func (t *TestingLocalStorage) SetStorage(f func(*storiface.StorageConfig)) error return nil } -func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) { +func (t *TestingLocalStorage) Stat(_ string) (fsutil.FsStat, error) { return fsutil.FsStat{ Capacity: pathSize, Available: pathSize, diff --git a/storage/pipeline/piece/piece_info.go b/storage/pipeline/piece/piece_info.go index 48e15751ad0..19dc834667c 100644 --- a/storage/pipeline/piece/piece_info.go +++ b/storage/pipeline/piece/piece_info.go @@ -90,8 +90,8 @@ func (ds *PieceDealInfo) Valid(nv network.Version) error { } type AllocationAPI interface { - StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) - StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocationForPendingDeal(ctx context.Context, dealID abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationID verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) } func (ds *PieceDealInfo) GetAllocation(ctx context.Context, aapi AllocationAPI, tsk types.TipSetKey) (*verifregtypes.Allocation, error) { diff --git a/storage/sealer/commitment/commr.go b/storage/sealer/commitment/commr.go index d5f5b0844f2..5fdd0f74a75 100644 --- a/storage/sealer/commitment/commr.go +++ b/storage/sealer/commitment/commr.go @@ -19,11 +19,11 @@ func CommR(commC, commRLast [32]byte) ([32]byte, error) { commRLast[i], commRLast[j] = commRLast[j], commRLast[i] } - input_a := new(big.Int) - input_a.SetBytes(commC[:]) - input_b := new(big.Int) - input_b.SetBytes(commRLast[:]) - input := []*big.Int{input_a, input_b} + inputA := new(big.Int) + inputA.SetBytes(commC[:]) + inputB := new(big.Int) + inputB.SetBytes(commRLast[:]) + input := []*big.Int{inputA, inputB} cons, err := poseidon.GenPoseidonConstants(3) if err != nil { diff --git a/storage/sealer/ffiwrapper/basicfs/fs.go b/storage/sealer/ffiwrapper/basicfs/fs.go index 47c7f526e32..cc811dcc989 100644 --- a/storage/sealer/ffiwrapper/basicfs/fs.go +++ b/storage/sealer/ffiwrapper/basicfs/fs.go @@ -23,7 +23,7 @@ type Provider struct { waitSector map[sectorFile]chan struct{} } -func (b *Provider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { +func (b *Provider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, _ storiface.PathType) (storiface.SectorPaths, func(), error) { if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint return storiface.SectorPaths{}, nil, err } diff --git a/storage/sealer/fr32/fr32_test.go b/storage/sealer/fr32/fr32_test.go index d5f094df492..2f0144bf330 100644 --- a/storage/sealer/fr32/fr32_test.go +++ b/storage/sealer/fr32/fr32_test.go @@ -88,7 +88,7 @@ func TestPadChunkRandEqFFI(t *testing.T) { } func TestRoundtrip(t *testing.T) { - testByteChunk := func(b byte) func(*testing.T) { + testByteChunk := func(_ byte) func(*testing.T) { return func(t *testing.T) { var buf [128]byte input := bytes.Repeat([]byte{0x01}, 127) diff --git a/storage/sealer/manager.go b/storage/sealer/manager.go index 00514c79eda..6d858f2c9d7 100644 --- a/storage/sealer/manager.go +++ b/storage/sealer/manager.go @@ -1315,8 +1315,8 @@ func (m *Manager) SchedDiag(ctx context.Context, doSched bool) (interface{}, err return i, nil } -func (m *Manager) RemoveSchedRequest(ctx context.Context, schedId uuid.UUID) error { - return m.sched.RemoveRequest(ctx, schedId) +func (m *Manager) RemoveSchedRequest(ctx context.Context, schedID uuid.UUID) error { + return m.sched.RemoveRequest(ctx, schedID) } func (m *Manager) Close(ctx context.Context) error { diff --git a/storage/sealer/manager_post.go b/storage/sealer/manager_post.go index 27a71ef8caf..a1020d4b6c8 100644 --- a/storage/sealer/manager_post.go +++ b/storage/sealer/manager_post.go @@ -108,7 +108,7 @@ func dedupeSectorInfo(sectorInfo []proof.ExtendedSectorInfo) []proof.ExtendedSec } func (m *Manager) generateWindowPoSt(ctx context.Context, minerID abi.ActorID, ppt abi.RegisteredPoStProof, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { - var retErr error = nil + var retErr error randomness[31] &= 0x3f out := make([]proof.PoStProof, 0) diff --git a/storage/sealer/manager_test.go b/storage/sealer/manager_test.go index bd03cd09714..17fd6b09831 100644 --- a/storage/sealer/manager_test.go +++ b/storage/sealer/manager_test.go @@ -44,7 +44,7 @@ func init() { type testStorage storiface.StorageConfig -func (t testStorage) DiskUsage(path string) (int64, error) { +func (t testStorage) DiskUsage(_ string) (int64, error) { return 1, nil // close enough } @@ -798,7 +798,7 @@ func TestResUse(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, func(s string) (string, bool) { + }, func(_ string) (string, bool) { return "", false }, stor, lstor, idx, m, statestore.New(wds)) diff --git a/storage/sealer/mock/mock.go b/storage/sealer/mock/mock.go index e33be847715..1efeba79540 100644 --- a/storage/sealer/mock/mock.go +++ b/storage/sealer/mock/mock.go @@ -222,7 +222,7 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid storiface.SectorRe }, nil } -func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storiface.SectorCids) (output storiface.Commit1Out, err error) { +func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, _ []abi.PieceInfo, cids storiface.SectorCids) (output storiface.Commit1Out, err error) { mgr.lk.Lock() ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() @@ -667,7 +667,7 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate prooftypes.AggregateSeal return ok, nil } -func (m mockVerifProver) VerifyReplicaUpdate(update prooftypes.ReplicaUpdateInfo) (bool, error) { +func (m mockVerifProver) VerifyReplicaUpdate(_ prooftypes.ReplicaUpdateInfo) (bool, error) { return true, nil } diff --git a/storage/sealer/sched.go b/storage/sealer/sched.go index c0ac11bcf9d..8746e651f9d 100644 --- a/storage/sealer/sched.go +++ b/storage/sealer/sched.go @@ -90,7 +90,7 @@ type Scheduler struct { } type WorkerHandle struct { - workerRpc Worker + workerRPC Worker Info storiface.WorkerInfo @@ -132,7 +132,8 @@ type WorkerRequest struct { TaskType sealtasks.TaskType Priority int // larger values more important Sel WorkerSelector - SchedId uuid.UUID + // revive:disable-next-line:var-naming + SchedId uuid.UUID prepare PrepareAction work WorkerAction @@ -441,12 +442,12 @@ func (sh *Scheduler) removeRequest(rmrequest *rmRequest) { rmrequest.res <- xerrors.New("No request with provided details found") } -func (sh *Scheduler) RemoveRequest(ctx context.Context, schedId uuid.UUID) error { +func (sh *Scheduler) RemoveRequest(ctx context.Context, schedID uuid.UUID) error { ret := make(chan error, 1) select { case sh.rmRequest <- &rmRequest{ - id: schedId, + id: schedID, res: ret, }: case <-sh.closing: diff --git a/storage/sealer/sched_post.go b/storage/sealer/sched_post.go index c6bd8182976..f8a3bec9e6c 100644 --- a/storage/sealer/sched_post.go +++ b/storage/sealer/sched_post.go @@ -56,7 +56,7 @@ func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealta func (ps *poStScheduler) delWorker(wid storiface.WorkerID) *WorkerHandle { ps.lk.Lock() defer ps.lk.Unlock() - var w *WorkerHandle = nil + var w *WorkerHandle if wh, ok := ps.workers[wid]; ok { w = wh delete(ps.workers, wid) @@ -115,7 +115,7 @@ func (ps *poStScheduler) Schedule(ctx context.Context, primary bool, spt abi.Reg ps.lk.Unlock() defer ps.lk.Lock() - return work(ctx, worker.workerRpc) + return work(ctx, worker.workerRPC) }) if err == nil { return nil @@ -201,7 +201,7 @@ func (ps *poStScheduler) watch(wid storiface.WorkerID, worker *WorkerHandle) { } sctx, scancel := context.WithTimeout(ctx, paths.HeartbeatInterval/2) - curSes, err := worker.workerRpc.Session(sctx) + curSes, err := worker.workerRPC.Session(sctx) scancel() if err != nil { // Likely temporary error diff --git a/storage/sealer/sched_test.go b/storage/sealer/sched_test.go index a991ff3fde2..9b2fafec9c7 100644 --- a/storage/sealer/sched_test.go +++ b/storage/sealer/sched_test.go @@ -324,7 +324,7 @@ func TestSched(t *testing.T) { taskStarted := func(name string) task { _, _, l, _ := runtime.Caller(1) _, _, l2, _ := runtime.Caller(2) - return func(t *testing.T, sched *Scheduler, index *paths.MemIndex, rm *runMeta) { + return func(t *testing.T, _ *Scheduler, index *paths.MemIndex, rm *runMeta) { select { case rm.done[name] <- struct{}{}: case <-ctx.Done(): @@ -638,7 +638,7 @@ func BenchmarkTrySched(b *testing.B) { sched, err := newScheduler(ctx, "") require.NoError(b, err) sched.Workers[storiface.WorkerID{}] = &WorkerHandle{ - workerRpc: &tw{Worker: &whnd}, + workerRPC: &tw{Worker: &whnd}, Info: storiface.WorkerInfo{ Hostname: "t", Resources: decentWorkerResources, diff --git a/storage/sealer/sched_worker.go b/storage/sealer/sched_worker.go index b482be589c5..9356345c321 100644 --- a/storage/sealer/sched_worker.go +++ b/storage/sealer/sched_worker.go @@ -33,7 +33,7 @@ func newWorkerHandle(ctx context.Context, w Worker) (*WorkerHandle, error) { tc := newTaskCounter() worker := &WorkerHandle{ - workerRpc: w, + workerRPC: w, Info: info, preparing: NewActiveResources(tc), @@ -203,7 +203,7 @@ func (sw *schedWorker) disable(ctx context.Context) error { func (sw *schedWorker) checkSession(ctx context.Context) bool { for { sctx, scancel := context.WithTimeout(ctx, paths.HeartbeatInterval/2) - curSes, err := sw.worker.workerRpc.Session(sctx) + curSes, err := sw.worker.workerRPC.Session(sctx) scancel() if err != nil { // Likely temporary error @@ -462,7 +462,7 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error { go func() { // first run the prepare step (e.g. fetching sector data from other worker) - tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc) + tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRPC) tw.start() err := req.prepare.Action(req.Ctx, tw) w.lk.Lock() @@ -488,7 +488,7 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error { return } - tw = sh.workTracker.worker(sw.wid, w.Info, w.workerRpc) + tw = sh.workTracker.worker(sw.wid, w.Info, w.workerRPC) // start tracking work first early in case we need to wait for resources werr := make(chan error, 1) @@ -550,7 +550,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *WorkerRequest) error { go func() { // Do the work! - tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc) + tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRPC) tw.start() err := req.work(req.Ctx, tw) diff --git a/storage/sealer/sched_worker_cache.go b/storage/sealer/sched_worker_cache.go index a17bf567464..0c4d9786c64 100644 --- a/storage/sealer/sched_worker_cache.go +++ b/storage/sealer/sched_worker_cache.go @@ -29,8 +29,8 @@ func (s *schedWorkerCache) Get(id storiface.WorkerID) (*cachedSchedWorker, bool) whnd := s.Workers[id] s.cached[id] = &cachedSchedWorker{ - tt: lazy.MakeLazyCtx(whnd.workerRpc.TaskTypes), - paths: lazy.MakeLazyCtx(whnd.workerRpc.Paths), + tt: lazy.MakeLazyCtx(whnd.workerRPC.TaskTypes), + paths: lazy.MakeLazyCtx(whnd.workerRPC.Paths), utilization: lazy.MakeLazy(func() (float64, error) { return whnd.Utilization(), nil }), diff --git a/storage/sealer/selector_move.go b/storage/sealer/selector_move.go index 3f07a75deff..386f66d1e96 100644 --- a/storage/sealer/selector_move.go +++ b/storage/sealer/selector_move.go @@ -21,7 +21,7 @@ type moveSelector struct { allowRemote bool } -func newMoveSelector(index paths.SectorIndex, sector abi.SectorID, alloc storiface.SectorFileType, destPtype storiface.PathType, miner abi.ActorID, allowRemote bool) *moveSelector { +func newMoveSelector(index paths.SectorIndex, sector abi.SectorID, alloc storiface.SectorFileType, destPtype storiface.PathType, _ abi.ActorID, allowRemote bool) *moveSelector { return &moveSelector{ index: index, sector: sector, diff --git a/storage/sealer/selector_task.go b/storage/sealer/selector_task.go index 805fcbbd03b..c618870a07f 100644 --- a/storage/sealer/selector_task.go +++ b/storage/sealer/selector_task.go @@ -19,7 +19,7 @@ func newTaskSelector() *taskSelector { return &taskSelector{} } -func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd SchedWorker) (bool, bool, error) { +func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, _ abi.RegisteredSealProof, whnd SchedWorker) (bool, bool, error) { tasks, err := whnd.TaskTypes(ctx) if err != nil { return false, false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/storage/sealer/stats.go b/storage/sealer/stats.go index 90b6287c360..6fd565226bb 100644 --- a/storage/sealer/stats.go +++ b/storage/sealer/stats.go @@ -21,7 +21,7 @@ func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.Worke ctx, cancel := context.WithTimeout(ctx, 3*time.Second) defer cancel() - tt, err := handle.workerRpc.TaskTypes(ctx) + tt, err := handle.workerRPC.TaskTypes(ctx) var taskList []sealtasks.TaskType if err != nil { log.Warnw("getting worker task types in WorkerStats", "error", err) diff --git a/storage/sealer/storiface/resources_test.go b/storage/sealer/storiface/resources_test.go index e360b8d1430..4a7f0f1ec57 100644 --- a/storage/sealer/storiface/resources_test.go +++ b/storage/sealer/storiface/resources_test.go @@ -27,7 +27,7 @@ func TestListResourceVars(t *testing.T) { } func TestListResourceOverride(t *testing.T) { - rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + rt, err := ParseResourceEnv(func(key, _ string) (string, bool) { if key == "UNS_2K_MAX_PARALLELISM" { return "2", true } @@ -51,7 +51,7 @@ func TestListResourceOverride(t *testing.T) { } func TestListResourceSDRMulticoreOverride(t *testing.T) { - rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + rt, err := ParseResourceEnv(func(key, _ string) (string, bool) { if key == "FIL_PROOFS_USE_MULTICORE_SDR" { return "1", true } @@ -63,7 +63,7 @@ func TestListResourceSDRMulticoreOverride(t *testing.T) { require.Equal(t, 4, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) require.Equal(t, 4, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) - rt, err = ParseResourceEnv(func(key, def string) (string, bool) { + rt, err = ParseResourceEnv(func(key, _ string) (string, bool) { if key == "FIL_PROOFS_USE_MULTICORE_SDR" { return "1", true } @@ -80,7 +80,7 @@ func TestListResourceSDRMulticoreOverride(t *testing.T) { } func TestUnsizedSetAll(t *testing.T) { - rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + rt, err := ParseResourceEnv(func(key, _ string) (string, bool) { if key == "UNS_MAX_PARALLELISM" { return "2", true } @@ -98,7 +98,7 @@ func TestUnsizedSetAll(t *testing.T) { } func TestUnsizedNotPreferred(t *testing.T) { - rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + rt, err := ParseResourceEnv(func(key, _ string) (string, bool) { if key == "DC_MAX_PARALLELISM" { return "2", true } diff --git a/storage/sealer/storiface/storage.go b/storage/sealer/storiface/storage.go index 143c3b5d560..85b61fe22ea 100644 --- a/storage/sealer/storiface/storage.go +++ b/storage/sealer/storiface/storage.go @@ -1,3 +1,5 @@ +// revive:disable var-naming + package storiface import ( diff --git a/storage/sealer/testworker_test.go b/storage/sealer/testworker_test.go index 6ea57b78d1f..13cbbb564b2 100644 --- a/storage/sealer/testworker_test.go +++ b/storage/sealer/testworker_test.go @@ -113,7 +113,7 @@ func (t *testWorker) SealPreCommit1(ctx context.Context, sector storiface.Sector }) } -func (t *testWorker) Fetch(ctx context.Context, sector storiface.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (t *testWorker) Fetch(ctx context.Context, sector storiface.SectorRef, _ storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { if err := t.ret.ReturnFetch(ctx, ci, nil); err != nil { log.Error(err) diff --git a/storage/sealer/worker_local.go b/storage/sealer/worker_local.go index 2a49447a271..66ab36bccb2 100644 --- a/storage/sealer/worker_local.go +++ b/storage/sealer/worker_local.go @@ -357,7 +357,7 @@ func (l *LocalWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSi return storiface.UndefCall, err } - return l.asyncCall(ctx, storiface.NoSectorRef, DataCid, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, storiface.NoSectorRef, DataCid, func(ctx context.Context, _ storiface.CallID) (interface{}, error) { return sb.DataCid(ctx, pieceSize, pieceData) }) } @@ -368,7 +368,7 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector storiface.SectorRef, return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, AddPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, AddPiece, func(ctx context.Context, _ storiface.CallID) (interface{}, error) { return sb.AddPiece(ctx, sector, epcs, sz, r) }) } @@ -827,7 +827,7 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - resEnv, err := storiface.ParseResourceEnv(func(key, def string) (string, bool) { + resEnv, err := storiface.ParseResourceEnv(func(key, _ string) (string, bool) { return l.envLookup(key) }) if err != nil { diff --git a/tools/stats/ipldstore/ipldstore.go b/tools/stats/ipldstore/ipldstore.go index 2b96fb0cd13..51d87b4744b 100644 --- a/tools/stats/ipldstore/ipldstore.go +++ b/tools/stats/ipldstore/ipldstore.go @@ -1,3 +1,5 @@ +// revive:disable:var-naming + package ipldstore import ( @@ -15,16 +17,16 @@ import ( type ApiIpldStore struct { ctx context.Context - api apiIpldStoreApi + api apiIpldStoreAPI cache *lru.TwoQueueCache[cid.Cid, []byte] cacheSize int } -type apiIpldStoreApi interface { +type apiIpldStoreAPI interface { ChainReadObj(context.Context, cid.Cid) ([]byte, error) } -func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi, cacheSize int) (*ApiIpldStore, error) { +func NewApiIpldStore(ctx context.Context, api apiIpldStoreAPI, cacheSize int) (*ApiIpldStore, error) { store := &ApiIpldStore{ ctx: ctx, api: api,