3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # Minimal implementation of Jonathan Ho, Ajay Jain, Pieter Abbeel
9 # "Denoising Diffusion Probabilistic Models" (2020)
11 # https://arxiv.org/abs/2006.11239
14 import matplotlib.pyplot as plt
18 ######################################################################
21 def __init__(self, model, decay = 0.9999):
26 for p in model.parameters():
27 self.ema[p] = p.clone()
31 for p in self.model.parameters():
32 self.ema[p].copy_(self.decay * self.ema[p] + (1 - self.decay) * p)
36 for p in self.model.parameters():
39 ######################################################################
41 def sample_gaussian_mixture(nb):
43 result = torch.empty(nb, 1).normal_(0, std)
44 result = result + torch.sign(torch.rand(result.size()) - p) / 2
48 theta = torch.rand(nb) * math.pi
49 rho = torch.rand(nb) * 0.1 + 0.7
50 result = torch.empty(nb, 2)
51 result[:, 0] = theta.cos() * rho
52 result[:, 1] = theta.sin() * rho
55 ######################################################################
60 train_input = sample_gaussian_mixture(nb_samples)
61 #train_input = sample_arc(nb_samples)
63 ######################################################################
67 model = nn.Sequential(
68 nn.Linear(train_input.size(1) + 1, nh),
72 nn.Linear(nh, train_input.size(1)),
79 beta = torch.linspace(1e-4, 0.02, T)
81 alpha_bar = alpha.log().cumsum(0).exp()
86 for k in range(nb_epochs):
89 optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
91 for x0 in train_input.split(batch_size):
92 t = torch.randint(T, (x0.size(0), 1))
93 eps = torch.randn(x0.size())
94 input = alpha_bar[t].sqrt() * x0 + (1 - alpha_bar[t]).sqrt() * eps
95 input = torch.cat((input, 2 * t / T - 1), 1)
97 loss = (eps - output).pow(2).mean()
102 acc_loss += loss.item()
106 if k%10 == 0: print(k, loss.item())
110 ######################################################################
113 x = torch.randn(10000, train_input.size(1))
115 for t in range(T-1, -1, -1):
116 z = torch.zeros(x.size()) if t == 0 else torch.randn(x.size())
117 input = torch.cat((x, torch.ones(x.size(0), 1) * 2 * t / T - 1), 1)
118 x = 1 / alpha[t].sqrt() * (x - (1 - alpha[t])/(1 - alpha_bar[t]).sqrt() * model(input)) \
121 ######################################################################
125 ax = fig.add_subplot(1, 1, 1)
127 if train_input.size(1) == 1:
129 ax.set_xlim(-1.25, 1.25)
131 d = train_input.flatten().detach().numpy()
132 ax.hist(d, 25, (-1, 1),
134 histtype = 'stepfilled', color = 'lightblue', label = 'Train')
136 d = x.flatten().detach().numpy()
137 ax.hist(d, 25, (-1, 1),
139 histtype = 'step', color = 'red', label = 'Synthesis')
141 ax.legend(frameon = False, loc = 2)
143 elif train_input.size(1) == 2:
145 ax.set_xlim(-1.25, 1.25)
146 ax.set_ylim(-1.25, 1.25)
149 d = train_input[:200].detach().numpy()
150 ax.scatter(d[:, 0], d[:, 1],
151 color = 'lightblue', label = 'Train')
153 d = x[:200].detach().numpy()
154 ax.scatter(d[:, 0], d[:, 1],
155 color = 'red', label = 'Synthesis')
157 ax.legend(frameon = False, loc = 2)
159 filename = 'diffusion.pdf'
160 print(f'saving {filename}')
161 fig.savefig(filename, bbox_inches='tight')
163 plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
166 ######################################################################