3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import matplotlib.pyplot as plt
12 import torch, torchvision
15 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17 print(f'device {device}')
19 ######################################################################
21 def sample_gaussian_mixture(nb):
23 result = torch.randn(nb, 1) * std
24 result = result + torch.sign(torch.rand(result.size()) - p) / 2
28 result = torch.min(torch.rand(nb, 1), torch.rand(nb, 1))
31 def sample_two_discs(nb):
32 a = torch.rand(nb) * math.pi * 2
33 b = torch.rand(nb).sqrt()
34 q = (torch.rand(nb) <= 0.5).long()
35 b = b * (0.3 + 0.2 * q)
36 result = torch.empty(nb, 2)
37 result[:, 0] = a.cos() * b - 0.5 + q
38 result[:, 1] = a.sin() * b - 0.5 + q
41 def sample_disc_grid(nb):
42 a = torch.rand(nb) * math.pi * 2
43 b = torch.rand(nb).sqrt()
45 q = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2)
46 r = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2)
48 result = torch.empty(nb, 2)
49 result[:, 0] = a.cos() * b + q
50 result[:, 1] = a.sin() * b + r
53 def sample_spiral(nb):
55 rho = u * 0.65 + 0.25 + torch.rand(nb) * 0.15
56 theta = u * math.pi * 3
57 result = torch.empty(nb, 2)
58 result[:, 0] = theta.cos() * rho
59 result[:, 1] = theta.sin() * rho
63 train_set = torchvision.datasets.MNIST(root = './data/', train = True, download = True)
64 result = train_set.data[:nb].to(device).view(-1, 1, 28, 28).float()
68 'gaussian_mixture': sample_gaussian_mixture,
70 'two_discs': sample_two_discs,
71 'disc_grid': sample_disc_grid,
72 'spiral': sample_spiral,
73 'mnist': sample_mnist,
76 ######################################################################
78 parser = argparse.ArgumentParser(
79 description = '''A minimal implementation of Jonathan Ho, Ajay Jain, Pieter Abbeel
80 "Denoising Diffusion Probabilistic Models" (2020)
81 https://arxiv.org/abs/2006.11239''',
83 formatter_class = argparse.ArgumentDefaultsHelpFormatter
86 parser.add_argument('--seed',
87 type = int, default = 0,
88 help = 'Random seed, < 0 is no seeding')
90 parser.add_argument('--nb_epochs',
91 type = int, default = 100,
92 help = 'How many epochs')
94 parser.add_argument('--batch_size',
95 type = int, default = 25,
98 parser.add_argument('--nb_samples',
99 type = int, default = 25000,
100 help = 'Number of training examples')
102 parser.add_argument('--learning_rate',
103 type = float, default = 1e-3,
104 help = 'Learning rate')
106 parser.add_argument('--ema_decay',
107 type = float, default = 0.9999,
108 help = 'EMA decay, < 0 is no EMA')
110 data_list = ', '.join( [ str(k) for k in samplers ])
112 parser.add_argument('--data',
113 type = str, default = 'gaussian_mixture',
114 help = f'Toy data-set to use: {data_list}')
116 args = parser.parse_args()
119 # torch.backends.cudnn.deterministic = True
120 # torch.backends.cudnn.benchmark = False
121 # torch.use_deterministic_algorithms(True)
122 torch.manual_seed(args.seed)
123 if torch.cuda.is_available():
124 torch.cuda.manual_seed_all(args.seed)
126 ######################################################################
129 def __init__(self, model, decay):
132 if self.decay < 0: return
134 with torch.no_grad():
135 for p in model.parameters():
136 self.ema[p] = p.clone()
139 if self.decay < 0: return
140 with torch.no_grad():
141 for p in self.model.parameters():
142 self.ema[p].copy_(self.decay * self.ema[p] + (1 - self.decay) * p)
145 if self.decay < 0: return
146 with torch.no_grad():
147 for p in self.model.parameters():
150 ######################################################################
152 class ConvNet(nn.Module):
153 def __init__(self, in_channels, out_channels):
158 self.core = nn.Sequential(
159 nn.Conv2d(in_channels, nc, ks, padding = ks//2),
161 nn.Conv2d(nc, nc, ks, padding = ks//2),
163 nn.Conv2d(nc, nc, ks, padding = ks//2),
165 nn.Conv2d(nc, nc, ks, padding = ks//2),
167 nn.Conv2d(nc, nc, ks, padding = ks//2),
169 nn.Conv2d(nc, out_channels, ks, padding = ks//2),
172 def forward(self, x):
175 ######################################################################
179 train_input = samplers[args.data](args.nb_samples).to(device)
181 print(f'unknown data {args.data}')
184 train_mean, train_std = train_input.mean(), train_input.std()
186 ######################################################################
189 if train_input.dim() == 2:
192 model = nn.Sequential(
193 nn.Linear(train_input.size(1) + 1, nh),
199 nn.Linear(nh, train_input.size(1)),
202 elif train_input.dim() == 4:
204 model = ConvNet(train_input.size(1) + 1, train_input.size(1))
208 print(f'nb_parameters {sum([ p.numel() for p in model.parameters() ])}')
210 ######################################################################
213 def generate(size, alpha, alpha_bar, sigma, model):
214 with torch.no_grad():
215 x = torch.randn(size, device = device)
217 for t in range(T-1, -1, -1):
218 z = torch.zeros_like(x) if t == 0 else torch.randn_like(x)
219 input = torch.cat((x, torch.full_like(x[:,:1], t / (T - 1) - 0.5)), 1)
220 x = 1/torch.sqrt(alpha[t]) \
221 * (x - (1-alpha[t]) / torch.sqrt(1-alpha_bar[t]) * model(input)) \
224 x = x * train_std + train_mean
228 ######################################################################
232 beta = torch.linspace(1e-4, 0.02, T, device = device)
234 alpha_bar = alpha.log().cumsum(0).exp()
237 ema = EMA(model, decay = args.ema_decay)
239 for k in range(args.nb_epochs):
242 optimizer = torch.optim.Adam(model.parameters(), lr = args.learning_rate)
244 for x0 in train_input.split(args.batch_size):
245 x0 = (x0 - train_mean) / train_std
246 t = torch.randint(T, (x0.size(0),) + (1,) * (x0.dim() - 1), device = x0.device)
247 eps = torch.randn_like(x0)
248 input = torch.sqrt(alpha_bar[t]) * x0 + torch.sqrt(1 - alpha_bar[t]) * eps
249 input = torch.cat((input, t.expand_as(x0[:,:1]) / (T - 1) - 0.5), 1)
250 loss = (eps - model(input)).pow(2).mean()
251 acc_loss += loss.item() * x0.size(0)
253 optimizer.zero_grad()
259 print(f'{k} {acc_loss / train_input.size(0)}')
263 ######################################################################
268 if train_input.dim() == 2:
271 ax = fig.add_subplot(1, 1, 1)
273 if train_input.size(1) == 1:
275 x = generate((10000, 1), alpha, alpha_bar, sigma, model)
277 ax.set_xlim(-1.25, 1.25)
278 ax.spines.right.set_visible(False)
279 ax.spines.top.set_visible(False)
281 d = train_input.flatten().detach().to('cpu').numpy()
282 ax.hist(d, 25, (-1, 1),
284 histtype = 'stepfilled', color = 'lightblue', label = 'Train')
286 d = x.flatten().detach().to('cpu').numpy()
287 ax.hist(d, 25, (-1, 1),
289 histtype = 'step', color = 'red', label = 'Synthesis')
291 ax.legend(frameon = False, loc = 2)
293 elif train_input.size(1) == 2:
295 x = generate((1000, 2), alpha, alpha_bar, sigma, model)
297 ax.set_xlim(-1.5, 1.5)
298 ax.set_ylim(-1.5, 1.5)
300 ax.spines.right.set_visible(False)
301 ax.spines.top.set_visible(False)
303 d = x.detach().to('cpu').numpy()
304 ax.scatter(d[:, 0], d[:, 1],
305 s = 2.0, color = 'red', label = 'Synthesis')
307 d = train_input[:x.size(0)].detach().to('cpu').numpy()
308 ax.scatter(d[:, 0], d[:, 1],
309 s = 2.0, color = 'gray', label = 'Train')
311 ax.legend(frameon = False, loc = 2)
313 filename = f'diffusion_{args.data}.pdf'
314 print(f'saving {filename}')
315 fig.savefig(filename, bbox_inches='tight')
317 if hasattr(plt.get_current_fig_manager(), 'window'):
318 plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
321 elif train_input.dim() == 4:
323 x = generate((128,) + train_input.size()[1:], alpha, alpha_bar, sigma, model)
324 x = 1 - x.clamp(min = 0, max = 255) / 255
325 torchvision.utils.save_image(x, f'diffusion_{args.data}.png', nrow = 16, pad_value = 0.8)
327 ######################################################################