Skip to content

Commit

Permalink
fix a missing residual needed at the top most resolution in the unet
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jun 16, 2022
1 parent 8b30be8 commit 844e557
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
8 changes: 4 additions & 4 deletions denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def __init__(

self.channels = channels

init_dim = default(init_dim, dim // 3 * 2)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(channels, init_dim, 7, padding = 3)

dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
Expand Down Expand Up @@ -288,8 +288,8 @@ def __init__(
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)

for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])):
is_last = ind >= (num_resolutions - 1)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)

self.ups.append(nn.ModuleList([
block_klass(dim_out * 2, dim_in, time_emb_dim = time_dim),
Expand Down Expand Up @@ -324,7 +324,7 @@ def forward(self, x, time):
x = self.mid_block2(x, t)

for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim=1)
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = block2(x, t)
x = attn(x)
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'denoising-diffusion-pytorch',
packages = find_packages(),
version = '0.18.4',
version = '0.19.1',
license='MIT',
description = 'Denoising Diffusion Probabilistic Models - Pytorch',
author = 'Phil Wang',
Expand Down

0 comments on commit 844e557

Please sign in to comment.