Update.
authorFrançois Fleuret <francois@fleuret.org>
Thu, 13 Jun 2024 17:59:09 +0000 (19:59 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Thu, 13 Jun 2024 17:59:09 +0000 (19:59 +0200)
redshift.py [new file with mode: 0755]

diff --git a/redshift.py b/redshift.py
new file mode 100755 (executable)
index 0000000..b3507ed
--- /dev/null
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+import math
+
+import torch, torchvision
+
+from torch import nn
+from torch.nn import functional as F
+
+torch.set_default_dtype(torch.float64)
+
+res = 256
+nh = 100
+
+input = torch.cat(
+    [
+        torch.linspace(-1, 1, res)[None, :, None].expand(res, res, 1),
+        torch.linspace(-1, 1, res)[:, None, None].expand(res, res, 1),
+    ],
+    dim=-1,
+).reshape(-1, 2)
+
+
+class Angles(nn.Module):
+    def forward(self, x):
+        return x.clamp(min=-0.5, max=0.5)
+
+
+for activation in [nn.ReLU, nn.Tanh, nn.Softplus, Angles]:
+    for s in [1.0, 10.0]:
+        layers = [nn.Linear(2, nh), activation()]
+        nb_hidden = 4
+        for k in range(nb_hidden):
+            layers += [nn.Linear(nh, nh), activation()]
+        layers += [nn.Linear(nh, 2)]
+        model = nn.Sequential(*layers)
+
+        with torch.no_grad():
+            for p in model.parameters():
+                p *= s
+
+        output = model(input)
+
+        img = (output[:, 1] - output[:, 0]).reshape(1, 1, res, res)
+
+        img = (img - img.mean()) / (1 * img.std())
+
+        img = img.clamp(min=-1, max=1)
+
+        img = torch.cat(
+            [
+                (1 + img).clamp(max=1),
+                (1 - img.abs()).clamp(min=0),
+                (1 - img).clamp(max=1),
+            ],
+            dim=1,
+        )
+
+        name_activation = {
+            nn.ReLU: "relu",
+            nn.Tanh: "tanh",
+            nn.Softplus: "softplus",
+            Angles: "angles",
+        }[activation]
+
+        torchvision.utils.save_image(img, f"result-{name_activation}-{s}.png")