diff --git a/cli/defradb/cmd/blocks_get.go b/cli/defradb/cmd/blocks_get.go index 2815c403fb..63aab57ef7 100644 --- a/cli/defradb/cmd/blocks_get.go +++ b/cli/defradb/cmd/blocks_get.go @@ -45,26 +45,26 @@ var getCmd = &cobra.Command{ endpoint, err := httpapi.JoinPaths(dbaddr, httpapi.BlocksPath, cid) if err != nil { - log.ErrorE(ctx, "join paths failed", err) + log.ErrorE(ctx, "Join paths failed", err) return } res, err := http.Get(endpoint.String()) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } defer func() { err = res.Body.Close() if err != nil { - log.ErrorE(ctx, "response body closing failed", err) + log.ErrorE(ctx, "Response body closing failed", err) } }() buf, err := io.ReadAll(res.Body) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } log.Debug(ctx, "", logging.NewKV("Block", string(buf))) diff --git a/cli/defradb/cmd/dump.go b/cli/defradb/cmd/dump.go index 8d27420e1d..44956ebd9a 100644 --- a/cli/defradb/cmd/dump.go +++ b/cli/defradb/cmd/dump.go @@ -41,26 +41,26 @@ var dumpCmd = &cobra.Command{ endpoint, err := httpapi.JoinPaths(dbaddr, httpapi.DumpPath) if err != nil { - log.ErrorE(ctx, "join paths failed", err) + log.ErrorE(ctx, "Join paths failed", err) return } res, err := http.Get(endpoint.String()) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } defer func() { err = res.Body.Close() if err != nil { - log.ErrorE(ctx, "response body closing failed", err) + log.ErrorE(ctx, "Response body closing failed", err) } }() buf, err := io.ReadAll(res.Body) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } if string(buf) == "ok" { diff --git a/cli/defradb/cmd/ping.go b/cli/defradb/cmd/ping.go index 4a6fa1c659..e44fbe138f 100644 --- a/cli/defradb/cmd/ping.go +++ b/cli/defradb/cmd/ping.go @@ -43,26 +43,26 @@ var pingCmd = &cobra.Command{ endpoint, err := httpapi.JoinPaths(dbaddr, httpapi.PingPath) if err != nil { - log.ErrorE(ctx, "join paths failed", err) + log.ErrorE(ctx, "Join paths failed", err) return } res, err := http.Get(endpoint.String()) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } defer func() { err = res.Body.Close() if err != nil { - log.ErrorE(ctx, "response body closing failed", err) + log.ErrorE(ctx, "Response body closing failed", err) } }() buf, err := io.ReadAll(res.Body) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } if string(buf) == "pong" { diff --git a/cli/defradb/cmd/query.go b/cli/defradb/cmd/query.go index e30d14db41..27198b0ed7 100644 --- a/cli/defradb/cmd/query.go +++ b/cli/defradb/cmd/query.go @@ -53,17 +53,17 @@ the additional documentation found at: https://hackmd.io/@source/BksQY6Qfw. } if len(args) != 1 { - log.Fatal(ctx, "needs a single query argument") + log.Fatal(ctx, "Needs a single query argument") } query := args[0] if query == "" { - log.Error(ctx, "missing query") + log.Error(ctx, "Missing query") return } endpoint, err := httpapi.JoinPaths(dbaddr, httpapi.GraphQLPath) if err != nil { - log.ErrorE(ctx, "join paths failed", err) + log.ErrorE(ctx, "Join paths failed", err) return } @@ -73,20 +73,20 @@ the additional documentation found at: https://hackmd.io/@source/BksQY6Qfw. res, err := http.Get(endpoint.String()) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } defer func() { err = res.Body.Close() if err != nil { - log.ErrorE(ctx, "response body closing failed: ", err) + log.ErrorE(ctx, "Response body closing failed: ", err) } }() buf, err := io.ReadAll(res.Body) if err != nil { - log.ErrorE(ctx, "request failed", err) + log.ErrorE(ctx, "Request failed", err) return } diff --git a/cli/defradb/cmd/schema_add.go b/cli/defradb/cmd/schema_add.go index ef1a348478..c99adc602b 100644 --- a/cli/defradb/cmd/schema_add.go +++ b/cli/defradb/cmd/schema_add.go @@ -58,7 +58,7 @@ var addCmd = &cobra.Command{ } endpoint, err := httpapi.JoinPaths(dbaddr, httpapi.SchemaLoadPath) if err != nil { - log.ErrorE(ctx, "join paths failed", err) + log.ErrorE(ctx, "Join paths failed", err) return } @@ -68,7 +68,7 @@ var addCmd = &cobra.Command{ defer func() { err = res.Body.Close() if err != nil { - log.ErrorE(ctx, "response body closing failed", err) + log.ErrorE(ctx, "Response body closing failed", err) } }() diff --git a/cli/defradb/cmd/serverdump.go b/cli/defradb/cmd/serverdump.go index fa05bedafe..9fde2e449a 100644 --- a/cli/defradb/cmd/serverdump.go +++ b/cli/defradb/cmd/serverdump.go @@ -43,7 +43,7 @@ var srvDumpCmd = &cobra.Command{ if config.Database.Store == databaseStoreName { log.Info( ctx, - "opening badger store", + "Opening badger store", logging.NewKV("Path", config.Database.Badger.Path), ) rootstore, err = badgerds.NewDatastore( @@ -51,18 +51,18 @@ var srvDumpCmd = &cobra.Command{ config.Database.Badger.Options, ) } else { - log.Fatal(ctx, "Server side dump is only supported for the Badger datastore") + log.Fatal(ctx, "Server-side dump is only supported for the Badger datastore") } if err != nil { - log.FatalE(ctx, "Failed to initiate datastore:", err) + log.FatalE(ctx, "Failed to initialize datastore", err) } db, err := db.NewDB(ctx, rootstore) if err != nil { - log.FatalE(ctx, "Failed to initiate database:", err) + log.FatalE(ctx, "Failed to initialize database", err) } - log.Info(ctx, "Dumping DB state:") + log.Info(ctx, "Dumping DB state...") db.PrintDump(ctx) }, } diff --git a/cli/defradb/cmd/start.go b/cli/defradb/cmd/start.go index 6feb20a5f0..942d350d76 100644 --- a/cli/defradb/cmd/start.go +++ b/cli/defradb/cmd/start.go @@ -68,7 +68,7 @@ var startCmd = &cobra.Command{ if config.Database.Store == "badger" { log.Info( ctx, - "opening badger store", + "Opening badger store", logging.NewKV("Path", config.Database.Badger.Path), ) rootstore, err = badgerds.NewDatastore( @@ -76,7 +76,7 @@ var startCmd = &cobra.Command{ config.Database.Badger.Options, ) } else if config.Database.Store == "memory" { - log.Info(ctx, "building new memory store") + log.Info(ctx, "Building new memory store") opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err = badgerds.NewDatastore("", &opts) } @@ -102,7 +102,7 @@ var startCmd = &cobra.Command{ // init the p2p node var n *node.Node if !config.Net.P2PDisabled { - log.Info(ctx, "Starting P2P node", logging.NewKV("tcp address", config.Net.TCPAddress)) + log.Info(ctx, "Starting P2P node", logging.NewKV("TCP address", config.Net.TCPAddress)) n, err = node.NewNode( ctx, db, @@ -111,7 +111,7 @@ var startCmd = &cobra.Command{ node.ListenP2PAddrStrings(config.Net.P2PAddress), node.WithPubSub(true)) if err != nil { - log.ErrorE(ctx, "Failed to start p2p node", err) + log.ErrorE(ctx, "Failed to start P2P node", err) n.Close() //nolint db.Close(ctx) os.Exit(1) @@ -124,12 +124,12 @@ var startCmd = &cobra.Command{ if err != nil { log.ErrorE(ctx, "Failed to parse bootstrap peers", err) } - log.Debug(ctx, "Bootstraping with peers", logging.NewKV("Addresses", addrs)) + log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) n.Boostrap(addrs) } if err := n.Start(); err != nil { - log.ErrorE(ctx, "Failed to start p2p listeners", err) + log.ErrorE(ctx, "Failed to start P2P listeners", err) n.Close() //nolint db.Close(ctx) os.Exit(1) @@ -160,11 +160,11 @@ var startCmd = &cobra.Command{ netService := netapi.NewService(n.Peer) go func() { - log.Info(ctx, "Started gRPC server", logging.NewKV("Address", addr)) + log.Info(ctx, "Started RPC server", logging.NewKV("Address", addr)) netpb.RegisterServiceServer(server, netService) if err := server.Serve(tcplistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FatalE(ctx, "serve error", err) + log.FatalE(ctx, "Server error", err) } }() } @@ -194,7 +194,7 @@ var startCmd = &cobra.Command{ // wait for shutdown signal <-signalCh - log.Info(ctx, "Received interrupt; closing db") + log.Info(ctx, "Received interrupt; closing database...") if n != nil { n.Close() //nolint } diff --git a/datastore/badger/v3/datastore.go b/datastore/badger/v3/datastore.go index 581486cec0..3a8ebd8d30 100644 --- a/datastore/badger/v3/datastore.go +++ b/datastore/badger/v3/datastore.go @@ -199,7 +199,7 @@ func (d *Datastore) periodicGC() { case ErrClosed: return default: - log.Errorf("error during a GC cycle: %s", err) + log.Errorf("Error during a GC cycle: %s", err) // Not much we can do on a random error but log it and continue. gcTimeout.Reset(d.gcInterval) } @@ -431,7 +431,7 @@ func (d *Datastore) Batch(ctx context.Context) (ds.Batch, error) { // batch is abandoned. runtime.SetFinalizer(b, func(b *batch) { b.cancel() - log.Error("batch not committed or canceled") + log.Error("Batch not committed or canceled") }) return b, nil diff --git a/datastore/blockstore.go b/datastore/blockstore.go index f44dbd4865..4f85bb851e 100644 --- a/datastore/blockstore.go +++ b/datastore/blockstore.go @@ -70,7 +70,7 @@ func (bs *bstore) HashOnRead(enabled bool) { func (bs *bstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { if !k.Defined() { - log.Error(ctx, "Undefined cid in blockstore") + log.Error(ctx, "Undefined CID in blockstore") return nil, ipld.ErrNotFound{Cid: k} } bdata, err := bs.store.Get(ctx, dshelp.MultihashToDsKey(k.Hash())) diff --git a/db/collection.go b/db/collection.go index 6debf674f6..39a1852032 100644 --- a/db/collection.go +++ b/db/collection.go @@ -171,7 +171,7 @@ func (db *db) CreateCollection( ctx, "Created collection", logging.NewKV("Name", col.Name()), - logging.NewKV("Id", col.SchemaID), + logging.NewKV("ID", col.SchemaID), ) return col, err } @@ -213,7 +213,7 @@ func (db *db) GetCollectionByName(ctx context.Context, name string) (client.Coll ctx, "Retrieved collection", logging.NewKV("Name", desc.Name), - logging.NewKV("Id", sid), + logging.NewKV("ID", sid), ) return &collection{ diff --git a/db/db.go b/db/db.go index d5f43fce48..952e4876a5 100644 --- a/db/db.go +++ b/db/db.go @@ -80,18 +80,18 @@ func NewDB(ctx context.Context, rootstore ds.Batching, options ...Option) (clien } func newDB(ctx context.Context, rootstore ds.Batching, options ...Option) (*db, error) { - log.Debug(ctx, "loading: internal datastores") + log.Debug(ctx, "Loading: internal datastores") root := datastore.AsDSReaderWriter(rootstore) multistore := datastore.MultiStoreFrom(root) crdtFactory := crdt.DefaultFactory.WithStores(multistore) - log.Debug(ctx, "loading: schema manager") + log.Debug(ctx, "Loading: schema manager") sm, err := schema.NewSchemaManager() if err != nil { return nil, err } - log.Debug(ctx, "loading: query executor") + log.Debug(ctx, "Loading: query executor") exec, err := planner.NewQueryExecutor(sm) if err != nil { return nil, err @@ -147,7 +147,7 @@ func (db *db) initialize(ctx context.Context) error { db.glock.Lock() defer db.glock.Unlock() - log.Debug(ctx, "Checking if db has already been initialized...") + log.Debug(ctx, "Checking if DB has already been initialized...") exists, err := db.systemstore().Has(ctx, ds.NewKey("init")) if err != nil && err != ds.ErrNotFound { return err @@ -155,7 +155,7 @@ func (db *db) initialize(ctx context.Context) error { // if we're loading an existing database, just load the schema // and finish initialization if exists { - log.Debug(ctx, "DB has already been initialized, continuing.") + log.Debug(ctx, "DB has already been initialized, continuing") return db.loadSchema(ctx) } diff --git a/merkle/clock/clock.go b/merkle/clock/clock.go index 77fe7531ff..7afb07c9d5 100644 --- a/merkle/clock/clock.go +++ b/merkle/clock/clock.go @@ -135,7 +135,7 @@ func (mc *MerkleClock) ProcessNode( node ipld.Node, ) ([]cid.Cid, error) { current := node.Cid() - log.Debug(ctx, "Running ProcessNode", logging.NewKV("Cid", current)) + log.Debug(ctx, "Running ProcessNode", logging.NewKV("CID", current)) err := mc.crdt.Merge(ctx, delta, dshelp.MultihashToDsKey(current.Hash()).String()) if err != nil { return nil, fmt.Errorf("error merging delta from %s : %w", current, err) @@ -146,7 +146,7 @@ func (mc *MerkleClock) ProcessNode( hasHeads := false log.Debug(ctx, "Stepping through node links") for _, l := range links { - log.Debug(ctx, "checking link", logging.NewKV("Name", l.Name), logging.NewKV("Cid", l.Cid)) + log.Debug(ctx, "Checking link", logging.NewKV("Name", l.Name), logging.NewKV("CID", l.Cid)) if l.Name == "_head" { hasHeads = true break @@ -194,7 +194,7 @@ func (mc *MerkleClock) ProcessNode( if err != nil { log.ErrorE( ctx, - "error adding head (when root is new head)", + "Failure adding head (when root is a new head)", err, logging.NewKV("Root", root), ) diff --git a/merkle/clock/heads.go b/merkle/clock/heads.go index 5dc35d6c9d..60e72fa4c0 100644 --- a/merkle/clock/heads.go +++ b/merkle/clock/heads.go @@ -99,7 +99,7 @@ func (hh *heads) Replace(ctx context.Context, h, c cid.Cid, height uint64) error ctx, "Replacing DAG head", logging.NewKV("Old", h), - logging.NewKV("Cid", c), + logging.NewKV("CID", c), logging.NewKV("Height", height)) var store ds.Write = hh.store var err error @@ -133,7 +133,7 @@ func (hh *heads) Replace(ctx context.Context, h, c cid.Cid, height uint64) error func (hh *heads) Add(ctx context.Context, c cid.Cid, height uint64) error { log.Info(ctx, "Adding new DAG head", - logging.NewKV("Cid", c), + logging.NewKV("CID", c), logging.NewKV("Height", height)) return hh.write(ctx, hh.store, c, height) } diff --git a/merkle/crdt/merklecrdt.go b/merkle/crdt/merklecrdt.go index 3b7f51eb60..2db30a5296 100644 --- a/merkle/crdt/merklecrdt.go +++ b/merkle/crdt/merklecrdt.go @@ -105,7 +105,7 @@ func (base *baseMerkleCRDT) Broadcast(ctx context.Context, nd ipld.Node, delta c ctx, "Broadcasting new DAG node", logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", c), + logging.NewKV("CID", c), ) // we dont want to wait around for the broadcast go func() { @@ -122,7 +122,7 @@ func (base *baseMerkleCRDT) Broadcast(ctx context.Context, nd ipld.Node, delta c "Failed to broadcast MerkleCRDT update", err, logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", c), + logging.NewKV("CID", c), ) } }() diff --git a/net/client.go b/net/client.go index 8c00e9fba6..870ffedaaf 100644 --- a/net/client.go +++ b/net/client.go @@ -42,7 +42,7 @@ func (s *server) pushLog(ctx context.Context, lg core.Log, pid peer.ID) error { ctx, "Preparing pushLog request", logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", lg.Cid), + logging.NewKV("CID", lg.Cid), logging.NewKV("SchemaId", lg.SchemaID)) body := &pb.PushLogRequest_Body{ @@ -60,8 +60,8 @@ func (s *server) pushLog(ctx context.Context, lg core.Log, pid peer.ID) error { log.Debug( ctx, "Pushing log", logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", lg.Cid), - logging.NewKV("Pid", pid)) + logging.NewKV("CID", lg.Cid), + logging.NewKV("PID", pid)) client, err := s.dial(pid) // grpc dial over p2p stream if err != nil { diff --git a/net/dag.go b/net/dag.go index 4823108e4d..3a66ab880f 100644 --- a/net/dag.go +++ b/net/dag.go @@ -84,9 +84,9 @@ func (p *Peer) dagWorker() { for job := range p.jobQueue { log.Debug( p.ctx, - "Starting new job from dag queue", + "Starting new job from DAG queue", logging.NewKV("DocKey", job.dockey), - logging.NewKV("Cid", job.node.Cid()), + logging.NewKV("CID", job.node.Cid()), ) select { @@ -113,7 +113,7 @@ func (p *Peer) dagWorker() { "Error processing log", err, logging.NewKV("DocKey", job.dockey), - logging.NewKV("Cid", job.node.Cid()), + logging.NewKV("CID", job.node.Cid()), ) job.session.Done() continue diff --git a/net/peer.go b/net/peer.go index 45bac5394c..472e114c7a 100644 --- a/net/peer.go +++ b/net/peer.go @@ -130,7 +130,7 @@ func (p *Peer) Start() error { pb.RegisterServiceServer(p.p2pRPC, p.server) if err := p.p2pRPC.Serve(p2plistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FatalE(p.ctx, "Fatal p2p rpc serve error", err) + log.FatalE(p.ctx, "Fatal P2P RPC server error", err) } }() @@ -187,7 +187,7 @@ func (p *Peer) handleBroadcastLoop() { } else if msg.Priority > 1 { err = p.handleDocUpdateLog(msg) } else { - log.Warn(p.ctx, "Skipping log with invalid priority of 0", logging.NewKV("Cid", msg.Cid)) + log.Warn(p.ctx, "Skipping log with invalid priority of 0", logging.NewKV("CID", msg.Cid)) } if err != nil { @@ -211,7 +211,7 @@ func (p *Peer) RegisterNewDocument( block, err := p.db.Blockstore().Get(ctx, c) if err != nil { - log.ErrorE(p.ctx, "Failed to get document cid", err) + log.ErrorE(p.ctx, "Failed to get document CID", err) return err } @@ -343,7 +343,7 @@ func (p *Peer) AddReplicator( "Failed to get heads", err, logging.NewKV("DocKey", dockey), - logging.NewKV("Pid", pid), + logging.NewKV("PID", pid), logging.NewKV("Collection", collectionName)) continue } @@ -352,8 +352,8 @@ func (p *Peer) AddReplicator( blk, err := txn.DAGstore().Get(ctx, c) if err != nil { log.ErrorE(p.ctx, "Failed to get block", err, - logging.NewKV("Cid", c), - logging.NewKV("Pid", pid), + logging.NewKV("CID", c), + logging.NewKV("PID", pid), logging.NewKV("Collection", collectionName)) continue } @@ -361,7 +361,7 @@ func (p *Peer) AddReplicator( // @todo: remove encode/decode loop for core.Log data nd, err := dag.DecodeProtobuf(blk.RawData()) if err != nil { - log.ErrorE(p.ctx, "Failed to decode protobuf", err, logging.NewKV("Cid", c)) + log.ErrorE(p.ctx, "Failed to decode protobuf", err, logging.NewKV("CID", c)) continue } @@ -377,8 +377,8 @@ func (p *Peer) AddReplicator( p.ctx, "Failed to replicate log", err, - logging.NewKV("Cid", c), - logging.NewKV("Pid", pid), + logging.NewKV("CID", c), + logging.NewKV("PID", pid), ) } } @@ -409,7 +409,7 @@ func (p *Peer) handleDocUpdateLog(lg core.Log) error { p.ctx, "Preparing pubsub pushLog request from broadcast", logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", lg.Cid), + logging.NewKV("CID", lg.Cid), logging.NewKV("SchemaId", lg.SchemaID)) body := &pb.PushLogRequest_Body{ @@ -444,7 +444,7 @@ func (p *Peer) pushLogToReplicators(ctx context.Context, lg core.Log) { "Failed pushing log", err, logging.NewKV("DocKey", lg.DocKey), - logging.NewKV("Cid", lg.Cid), + logging.NewKV("CID", lg.Cid), logging.NewKV("PeerId", peerID)) } }(pid) @@ -462,7 +462,7 @@ func stopGRPCServer(ctx context.Context, server *grpc.Server) { select { case <-timer.C: server.Stop() - log.Warn(ctx, "peer GRPC server was shutdown ungracefully") + log.Warn(ctx, "Peer gRPC server was shutdown ungracefully") case <-stopped: timer.Stop() } diff --git a/net/process.go b/net/process.go index ce31fd0203..c63af561d8 100644 --- a/net/process.go +++ b/net/process.go @@ -72,9 +72,9 @@ func (p *Peer) processLog( log.Debug( ctx, - "Processing push log request", + "Processing PushLog request", logging.NewKV("DocKey", dockey), - logging.NewKV("Cid", c), + logging.NewKV("CID", c), ) height := delta.GetPriority() @@ -183,17 +183,17 @@ func (p *Peer) handleChildBlocks( // get object cNode, err := getter.Get(ctx, c) if err != nil { - log.ErrorE(ctx, "Failed to get node", err, logging.NewKV("Cid", c)) + log.ErrorE(ctx, "Failed to get node", err, logging.NewKV("CID", c)) continue } log.Debug( ctx, - "Submitting new job to dag queue", + "Submitting new job to DAG queue", logging.NewKV("Collection", col.Name()), logging.NewKV("DocKey", dockey), logging.NewKV("Field", fieldName), - logging.NewKV("Cid", cNode.Cid())) + logging.NewKV("CID", cNode.Cid())) session.Add(1) job := &dagJob{ diff --git a/net/server.go b/net/server.go index e56e5c588c..d32aeec20e 100644 --- a/net/server.go +++ b/net/server.go @@ -66,10 +66,10 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) s.opts = append(defaultOpts, opts...) if s.peer.ps != nil { // Get all DocKeys across all collections in the DB - log.Debug(p.ctx, "Getting all existing dockeys...") + log.Debug(p.ctx, "Getting all existing DocKey...") keyResults, err := s.listAllDocKeys() if err != nil { - return nil, fmt.Errorf("Failed to get dockeys for pubsub topic registration: %w", err) + return nil, fmt.Errorf("Failed to get DocKeys for pubsub topic registration: %w", err) } i := 0 @@ -124,7 +124,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { return nil, err } - log.Debug(ctx, "Received a pushLog request", logging.NewKV("Pid", pid)) + log.Debug(ctx, "Received a PushLog request", logging.NewKV("PID", pid)) // parse request object cid := req.Body.Cid.Cid @@ -156,10 +156,10 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { log.ErrorE( ctx, - "Failed to process push log node", + "Failed to process PushLog node", err, logging.NewKV("DocKey", docKey), - logging.NewKV("Cid", cid), + logging.NewKV("CID", cid), ) } @@ -169,13 +169,13 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL ctx, "Handling children for log", logging.NewKV("NChildren", len(cids)), - logging.NewKV("Cid", cid), + logging.NewKV("CID", cid), ) var session sync.WaitGroup s.peer.handleChildBlocks(&session, col, docKey, "", nd, cids, getter) session.Wait() } else { - log.Debug(ctx, "No more children to process for log", logging.NewKV("Cid", cid)) + log.Debug(ctx, "No more children to process for log", logging.NewKV("CID", cid)) } return &pb.PushLogReply{}, nil @@ -267,7 +267,7 @@ func (s *server) publishLog(ctx context.Context, dockey string, req *pb.PushLogR log.Debug( ctx, "Published log", - logging.NewKV("Cid", req.Body.Cid.Cid), + logging.NewKV("CID", req.Body.Cid.Cid), logging.NewKV("DocKey", dockey), ) return nil @@ -291,8 +291,8 @@ func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) Addr: addr{from}, }) if _, err := s.PushLog(ctx, req); err != nil { - log.ErrorE(ctx, "failed pushing log for doc", err, logging.NewKV("Topic", topic)) - return nil, fmt.Errorf("failed pushing log for doc %s: %w", topic, err) + log.ErrorE(ctx, "Failed pushing log for doc", err, logging.NewKV("Topic", topic)) + return nil, fmt.Errorf("Failed pushing log for doc %s: %w", topic, err) } return nil, nil } diff --git a/query/graphql/planner/type_join.go b/query/graphql/planner/type_join.go index 10fe851d50..717e63fe6f 100644 --- a/query/graphql/planner/type_join.go +++ b/query/graphql/planner/type_join.go @@ -326,7 +326,7 @@ func (n *typeJoinOne) valuesPrimary(doc map[string]interface{}) map[string]inter // re-initialize the sub type plan if err := n.subType.Init(); err != nil { - log.ErrorE(n.p.ctx, "Sub-type initalization error at scan node reset", err) + log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) return doc } @@ -336,7 +336,7 @@ func (n *typeJoinOne) valuesPrimary(doc map[string]interface{}) map[string]inter next, err := n.subType.Next() if err != nil { - log.ErrorE(n.p.ctx, "Sub-type initalization error at scan node reset", err) + log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) return doc } diff --git a/query/graphql/schema/generate_test.go b/query/graphql/schema/generate_test.go index a50d68eb94..98f7424d9f 100644 --- a/query/graphql/schema/generate_test.go +++ b/query/graphql/schema/generate_test.go @@ -796,7 +796,7 @@ func runTestConfigForbuildTypesFromASTSuite(t *testing.T, g *Generator, schema s assert.Equal(t, objDef.Name(), myObjectActual.Name(), "Mismatched object names from buildTypesFromAST") log.Info( ctx, - "expected vs actual objects", + "Expected vs actual objects", logging.NewKV("DefinitionFields", objDef.Fields()), logging.NewKV("ActualFields", myObjectActual.Fields()))