3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import matplotlib.pyplot as plt
12 import torch, torchvision
15 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17 ######################################################################
19 def sample_gaussian_mixture(nb):
21 result = torch.randn(nb, 1) * std
22 result = result + torch.sign(torch.rand(result.size()) - p) / 2
26 result = torch.min(torch.rand(nb, 1), torch.rand(nb, 1))
29 def sample_two_discs(nb):
30 a = torch.rand(nb) * math.pi * 2
31 b = torch.rand(nb).sqrt()
32 q = (torch.rand(nb) <= 0.5).long()
33 b = b * (0.3 + 0.2 * q)
34 result = torch.empty(nb, 2)
35 result[:, 0] = a.cos() * b - 0.5 + q
36 result[:, 1] = a.sin() * b - 0.5 + q
39 def sample_disc_grid(nb):
40 a = torch.rand(nb) * math.pi * 2
41 b = torch.rand(nb).sqrt()
43 q = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2)
44 r = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2)
46 result = torch.empty(nb, 2)
47 result[:, 0] = a.cos() * b + q
48 result[:, 1] = a.sin() * b + r
51 def sample_spiral(nb):
53 rho = u * 0.65 + 0.25 + torch.rand(nb) * 0.15
54 theta = u * math.pi * 3
55 result = torch.empty(nb, 2)
56 result[:, 0] = theta.cos() * rho
57 result[:, 1] = theta.sin() * rho
61 train_set = torchvision.datasets.MNIST(root = './data/', train = True, download = True)
62 result = train_set.data[:nb].to(device).view(-1, 1, 28, 28).float()
66 'gaussian_mixture': sample_gaussian_mixture,
68 'two_discs': sample_two_discs,
69 'disc_grid': sample_disc_grid,
70 'spiral': sample_spiral,
71 'mnist': sample_mnist,
74 ######################################################################
76 parser = argparse.ArgumentParser(
77 description = '''A minimal implementation of Jonathan Ho, Ajay Jain, Pieter Abbeel
78 "Denoising Diffusion Probabilistic Models" (2020)
79 https://arxiv.org/abs/2006.11239''',
81 formatter_class = argparse.ArgumentDefaultsHelpFormatter
84 parser.add_argument('--seed',
85 type = int, default = 0,
86 help = 'Random seed, < 0 is no seeding')
88 parser.add_argument('--nb_epochs',
89 type = int, default = 100,
90 help = 'How many epochs')
92 parser.add_argument('--batch_size',
93 type = int, default = 25,
96 parser.add_argument('--nb_samples',
97 type = int, default = 25000,
98 help = 'Number of training examples')
100 parser.add_argument('--learning_rate',
101 type = float, default = 1e-3,
102 help = 'Learning rate')
104 parser.add_argument('--ema_decay',
105 type = float, default = 0.9999,
106 help = 'EMA decay, < 0 is no EMA')
108 data_list = ', '.join( [ str(k) for k in samplers ])
110 parser.add_argument('--data',
111 type = str, default = 'gaussian_mixture',
112 help = f'Toy data-set to use: {data_list}')
114 args = parser.parse_args()
117 # torch.backends.cudnn.deterministic = True
118 # torch.backends.cudnn.benchmark = False
119 # torch.use_deterministic_algorithms(True)
120 torch.manual_seed(args.seed)
121 if torch.cuda.is_available():
122 torch.cuda.manual_seed_all(args.seed)
124 ######################################################################
127 def __init__(self, model, decay):
130 if self.decay < 0: return
132 with torch.no_grad():
133 for p in model.parameters():
134 self.ema[p] = p.clone()
137 if self.decay < 0: return
138 with torch.no_grad():
139 for p in self.model.parameters():
140 self.ema[p].copy_(self.decay * self.ema[p] + (1 - self.decay) * p)
143 if self.decay < 0: return
144 with torch.no_grad():
145 for p in self.model.parameters():
148 ######################################################################
150 class ConvNet(nn.Module):
151 def __init__(self, in_channels, out_channels):
156 self.core = nn.Sequential(
157 nn.Conv2d(in_channels, nc, ks, padding = ks//2),
159 nn.Conv2d(nc, nc, ks, padding = ks//2),
161 nn.Conv2d(nc, nc, ks, padding = ks//2),
163 nn.Conv2d(nc, nc, ks, padding = ks//2),
165 nn.Conv2d(nc, nc, ks, padding = ks//2),
167 nn.Conv2d(nc, out_channels, ks, padding = ks//2),
170 def forward(self, x):
173 ######################################################################
177 train_input = samplers[args.data](args.nb_samples).to(device)
179 print(f'unknown data {args.data}')
182 train_mean, train_std = train_input.mean(), train_input.std()
184 ######################################################################
187 if train_input.dim() == 2:
190 model = nn.Sequential(
191 nn.Linear(train_input.size(1) + 1, nh),
197 nn.Linear(nh, train_input.size(1)),
200 elif train_input.dim() == 4:
202 model = ConvNet(train_input.size(1) + 1, train_input.size(1))
206 print(f'nb_parameters {sum([ p.numel() for p in model.parameters() ])}')
208 ######################################################################
212 beta = torch.linspace(1e-4, 0.02, T, device = device)
214 alpha_bar = alpha.log().cumsum(0).exp()
217 ema = EMA(model, decay = args.ema_decay)
219 for k in range(args.nb_epochs):
222 optimizer = torch.optim.Adam(model.parameters(), lr = args.learning_rate)
224 for x0 in train_input.split(args.batch_size):
225 x0 = (x0 - train_mean) / train_std
226 t = torch.randint(T, (x0.size(0),) + (1,) * (x0.dim() - 1), device = x0.device)
227 eps = torch.randn_like(x0)
228 input = torch.sqrt(alpha_bar[t]) * x0 + torch.sqrt(1 - alpha_bar[t]) * eps
229 input = torch.cat((input, t.expand_as(x0[:,:1]) / (T - 1) - 0.5), 1)
230 loss = (eps - model(input)).pow(2).mean()
231 acc_loss += loss.item() * x0.size(0)
233 optimizer.zero_grad()
239 print(f'{k} {acc_loss / train_input.size(0)}')
243 ######################################################################
246 def generate(size, model):
247 with torch.no_grad():
248 x = torch.randn(size, device = device)
250 for t in range(T-1, -1, -1):
251 z = torch.zeros_like(x) if t == 0 else torch.randn_like(x)
252 input = torch.cat((x, torch.full_like(x[:,:1], t / (T - 1) - 0.5)), 1)
253 x = 1/torch.sqrt(alpha[t]) \
254 * (x - (1-alpha[t]) / torch.sqrt(1-alpha_bar[t]) * model(input)) \
257 x = x * train_std + train_mean
261 ######################################################################
266 if train_input.dim() == 2:
268 ax = fig.add_subplot(1, 1, 1)
270 if train_input.size(1) == 1:
272 x = generate((10000, 1), model)
274 ax.set_xlim(-1.25, 1.25)
275 ax.spines.right.set_visible(False)
276 ax.spines.top.set_visible(False)
278 d = train_input.flatten().detach().to('cpu').numpy()
279 ax.hist(d, 25, (-1, 1),
281 histtype = 'stepfilled', color = 'lightblue', label = 'Train')
283 d = x.flatten().detach().to('cpu').numpy()
284 ax.hist(d, 25, (-1, 1),
286 histtype = 'step', color = 'red', label = 'Synthesis')
288 ax.legend(frameon = False, loc = 2)
290 elif train_input.size(1) == 2:
292 x = generate((1000, 2), model)
294 ax.set_xlim(-1.5, 1.5)
295 ax.set_ylim(-1.5, 1.5)
297 ax.spines.right.set_visible(False)
298 ax.spines.top.set_visible(False)
300 d = x.detach().to('cpu').numpy()
301 ax.scatter(d[:, 0], d[:, 1],
302 s = 2.0, color = 'red', label = 'Synthesis')
304 d = train_input[:x.size(0)].detach().to('cpu').numpy()
305 ax.scatter(d[:, 0], d[:, 1],
306 s = 2.0, color = 'gray', label = 'Train')
308 ax.legend(frameon = False, loc = 2)
310 filename = f'diffusion_{args.data}.pdf'
311 print(f'saving {filename}')
312 fig.savefig(filename, bbox_inches='tight')
314 if hasattr(plt.get_current_fig_manager(), 'window'):
315 plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
318 elif train_input.dim() == 4:
319 x = generate((128,) + train_input.size()[1:], model)
320 x = 1 - x.clamp(min = 0, max = 255) / 255
321 torchvision.utils.save_image(x, f'diffusion_{args.data}.png', nrow = 16, pad_value = 0.8)
323 ######################################################################