From 150a02f9e495036849d0093e7371c3cf26ed6c63 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Thu, 13 Jun 2024 19:59:09 +0200 Subject: [PATCH] Update. --- redshift.py | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 redshift.py diff --git a/redshift.py b/redshift.py new file mode 100755 index 0000000..b3507ed --- /dev/null +++ b/redshift.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +import math + +import torch, torchvision + +from torch import nn +from torch.nn import functional as F + +torch.set_default_dtype(torch.float64) + +res = 256 +nh = 100 + +input = torch.cat( + [ + torch.linspace(-1, 1, res)[None, :, None].expand(res, res, 1), + torch.linspace(-1, 1, res)[:, None, None].expand(res, res, 1), + ], + dim=-1, +).reshape(-1, 2) + + +class Angles(nn.Module): + def forward(self, x): + return x.clamp(min=-0.5, max=0.5) + + +for activation in [nn.ReLU, nn.Tanh, nn.Softplus, Angles]: + for s in [1.0, 10.0]: + layers = [nn.Linear(2, nh), activation()] + nb_hidden = 4 + for k in range(nb_hidden): + layers += [nn.Linear(nh, nh), activation()] + layers += [nn.Linear(nh, 2)] + model = nn.Sequential(*layers) + + with torch.no_grad(): + for p in model.parameters(): + p *= s + + output = model(input) + + img = (output[:, 1] - output[:, 0]).reshape(1, 1, res, res) + + img = (img - img.mean()) / (1 * img.std()) + + img = img.clamp(min=-1, max=1) + + img = torch.cat( + [ + (1 + img).clamp(max=1), + (1 - img.abs()).clamp(min=0), + (1 - img).clamp(max=1), + ], + dim=1, + ) + + name_activation = { + nn.ReLU: "relu", + nn.Tanh: "tanh", + nn.Softplus: "softplus", + Angles: "angles", + }[activation] + + torchvision.utils.save_image(img, f"result-{name_activation}-{s}.png") -- 2.39.5