Skip to content

Commit

Permalink
Adding a test and fixing another torch.Tensor instance to torch.tensor.
Browse files Browse the repository at this point in the history
In general, torch.Tensor is for uninitialized tensors, because all of our tensors are with values and the datatype is inferred (not explicit) we should use torch.tensor.
  • Loading branch information
edenlum committed Dec 5, 2023
1 parent cbace9c commit 69ec6ac
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def _add_modules(self):
if node.type == BufferHolder:
self.get_submodule(node.name). \
register_buffer(node.name,
torch.Tensor(node.get_weights_by_keys(BUFFER)).to(get_working_device()))
torch.tensor(node.get_weights_by_keys(BUFFER)).to(get_working_device()))

# Add activation quantization modules if an activation holder is configured for this node
if node.is_activation_quantization_enabled() and self.get_activation_quantizer_holder is not None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(self):
self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.scalars = [torch.tensor(i) for i in range(-5, 6)]

def forward(self, x, y):
def forward(self, x):
x = self.conv1(x)
for scalar in self.scalars:
x = x + scalar
Expand All @@ -38,7 +38,7 @@ def __init__(self, unit_test):
super().__init__(unit_test)

def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32], [self.val_batch_size, 3, 32, 32]]
return [[self.val_batch_size, 3, 32, 32]]

def create_feature_network(self, input_shape):
return ScalarTensorNet()

0 comments on commit 69ec6ac

Please sign in to comment.