X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=minidiffusion.py;h=879b7964825e2a89b20a06c9228311d448f3f380;hb=560b7d51f52c7328e9d87ce717dacc4da7977de7;hp=2c54d196062ee1775385335d67c03ba29a34b3ca;hpb=a3c7617d0b5770edf6030502e4eac477a7218820;p=pytorch.git diff --git a/minidiffusion.py b/minidiffusion.py index 2c54d19..879b796 100755 --- a/minidiffusion.py +++ b/minidiffusion.py @@ -14,14 +14,20 @@ from torch import nn device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +print(f'device {device}') + ###################################################################### def sample_gaussian_mixture(nb): p, std = 0.3, 0.2 - result = torch.empty(nb, 1).normal_(0, std) + result = torch.randn(nb, 1) * std result = result + torch.sign(torch.rand(result.size()) - p) / 2 return result +def sample_ramp(nb): + result = torch.min(torch.rand(nb, 1), torch.rand(nb, 1)) + return result + def sample_two_discs(nb): a = torch.rand(nb) * math.pi * 2 b = torch.rand(nb).sqrt() @@ -35,8 +41,9 @@ def sample_two_discs(nb): def sample_disc_grid(nb): a = torch.rand(nb) * math.pi * 2 b = torch.rand(nb).sqrt() - q = torch.randint(5, (nb,)) / 2.5 - 2 / 2.5 - r = torch.randint(5, (nb,)) / 2.5 - 2 / 2.5 + N = 4 + q = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2) + r = (torch.randint(N, (nb,)) - (N - 1) / 2) / ((N - 1) / 2) b = b * 0.1 result = torch.empty(nb, 2) result[:, 0] = a.cos() * b + q @@ -59,6 +66,7 @@ def sample_mnist(nb): samplers = { 'gaussian_mixture': sample_gaussian_mixture, + 'ramp': sample_ramp, 'two_discs': sample_two_discs, 'disc_grid': sample_disc_grid, 'spiral': sample_spiral, @@ -179,7 +187,7 @@ train_mean, train_std = train_input.mean(), train_input.std() # Model if train_input.dim() == 2: - nh = 64 + nh = 256 model = nn.Sequential( nn.Linear(train_input.size(1) + 1, nh), @@ -197,6 +205,26 @@ elif train_input.dim() == 4: model.to(device) +print(f'nb_parameters {sum([ p.numel() for p in model.parameters() ])}') + +###################################################################### +# Generate + +def generate(size, alpha, alpha_bar, sigma, model): + with torch.no_grad(): + x = torch.randn(size, device = device) + + for t in range(T-1, -1, -1): + z = torch.zeros_like(x) if t == 0 else torch.randn_like(x) + input = torch.cat((x, torch.full_like(x[:,:1], t / (T - 1) - 0.5)), 1) + x = 1/torch.sqrt(alpha[t]) \ + * (x - (1-alpha[t]) / torch.sqrt(1-alpha_bar[t]) * model(input)) \ + + sigma[t] * z + + x = x * train_std + train_mean + + return x + ###################################################################### # Train @@ -228,42 +256,27 @@ for k in range(args.nb_epochs): ema.step() - if k%10 == 0: print(f'{k} {acc_loss / train_input.size(0)}') + print(f'{k} {acc_loss / train_input.size(0)}') ema.copy() -###################################################################### -# Generate - -def generate(size, model): - with torch.no_grad(): - x = torch.randn(size, device = device) - - for t in range(T-1, -1, -1): - z = torch.zeros_like(x) if t == 0 else torch.randn_like(x) - input = torch.cat((x, torch.full_like(x[:,:1], t / (T - 1) - 0.5)), 1) - x = 1/torch.sqrt(alpha[t]) \ - * (x - (1-alpha[t]) / torch.sqrt(1-alpha_bar[t]) * model(input)) \ - + sigma[t] * z - - x = x * train_std + train_mean - - return x - ###################################################################### # Plot model.eval() if train_input.dim() == 2: + fig = plt.figure() ax = fig.add_subplot(1, 1, 1) if train_input.size(1) == 1: - x = generate((10000, 1), model) + x = generate((10000, 1), alpha, alpha_bar, sigma, model) ax.set_xlim(-1.25, 1.25) + ax.spines.right.set_visible(False) + ax.spines.top.set_visible(False) d = train_input.flatten().detach().to('cpu').numpy() ax.hist(d, 25, (-1, 1), @@ -279,19 +292,21 @@ if train_input.dim() == 2: elif train_input.size(1) == 2: - x = generate((1000, 2), model) + x = generate((1000, 2), alpha, alpha_bar, sigma, model) - ax.set_xlim(-1.25, 1.25) - ax.set_ylim(-1.25, 1.25) + ax.set_xlim(-1.5, 1.5) + ax.set_ylim(-1.5, 1.5) ax.set(aspect = 1) + ax.spines.right.set_visible(False) + ax.spines.top.set_visible(False) - d = train_input[:x.size(0)].detach().to('cpu').numpy() + d = x.detach().to('cpu').numpy() ax.scatter(d[:, 0], d[:, 1], - color = 'lightblue', label = 'Train') + s = 2.0, color = 'red', label = 'Synthesis') - d = x.detach().to('cpu').numpy() + d = train_input[:x.size(0)].detach().to('cpu').numpy() ax.scatter(d[:, 0], d[:, 1], - facecolors = 'none', color = 'red', label = 'Synthesis') + s = 2.0, color = 'gray', label = 'Train') ax.legend(frameon = False, loc = 2) @@ -304,7 +319,8 @@ if train_input.dim() == 2: plt.show() elif train_input.dim() == 4: - x = generate((128,) + train_input.size()[1:], model) + + x = generate((128,) + train_input.size()[1:], alpha, alpha_bar, sigma, model) x = 1 - x.clamp(min = 0, max = 255) / 255 torchvision.utils.save_image(x, f'diffusion_{args.data}.png', nrow = 16, pad_value = 0.8)