diff --git a/torchrec/modules/fused_embedding_bag_collection.py b/torchrec/modules/fused_embedding_bag_collection.py index eb014c357..28c923888 100644 --- a/torchrec/modules/fused_embedding_bag_collection.py +++ b/torchrec/modules/fused_embedding_bag_collection.py @@ -441,7 +441,6 @@ def __init__( self._emb_modules.append(emb_module) params: Dict[str, torch.Tensor] = {} for param_key, weight in emb_module.fused_optimizer.params.items(): - # pyre-ignore params[f"embedding_bags.{param_key}"] = weight optims.append(("", emb_module.fused_optimizer)) diff --git a/torchrec/optim/keyed.py b/torchrec/optim/keyed.py index 5982eb399..772dccf07 100644 --- a/torchrec/optim/keyed.py +++ b/torchrec/optim/keyed.py @@ -222,7 +222,6 @@ def init_state( # pyre-ignore [16] t = t.to_sparse() # pyre-ignore [41] - # pyre-fixme[16]: `ShardedTensor` has no attribute `grad`. param.grad = torch.autograd.Variable(t) self.step(closure=None)