Initial commit
[tex.git] / warp.py
diff --git a/warp.py b/warp.py
new file mode 100755 (executable)
index 0000000..96dfa11
--- /dev/null
+++ b/warp.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
+
+import math, argparse, os
+
+import torch, torchvision
+
+from torch import nn
+from torch.nn import functional as F
+
+######################################################################
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("--result_dir", type=str, default="/tmp")
+
+args = parser.parse_args()
+
+######################################################################
+
+# If the source is older than the result, do nothing
+
+ref_filename = os.path.join(args.result_dir, f"warp_0.tex")
+
+if os.path.exists(ref_filename) and os.path.getmtime(__file__) < os.path.getmtime(
+    ref_filename
+):
+    exit(0)
+
+######################################################################
+
+torch.manual_seed(0)
+
+nb = 1000
+x = torch.rand(nb, 2) * torch.tensor([math.pi * 1.5, 0.10]) + torch.tensor(
+    [math.pi * -0.25, 0.25]
+)
+
+train_targets = (torch.rand(nb) < 0.5).long()
+train_input = torch.cat((x[:, 0:1].sin() * x[:, 1:2], x[:, 0:1].cos() * x[:, 1:2]), 1)
+train_input[:, 0] *= train_targets * 2 - 1
+train_input[:, 0] += 0.05 * (train_targets * 2 - 1)
+train_input[:, 1] -= 0.15 * (train_targets * 2 - 1)
+train_input *= 1.2
+
+
+class WithResidual(nn.Module):
+    def __init__(self, *f):
+        super().__init__()
+        self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
+
+    def forward(self, x):
+        return 0.5 * x + 0.5 * self.f(x)
+
+
+model = nn.Sequential(
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
+    nn.Linear(2, 2),
+)
+
+with torch.no_grad():
+    for p in model.modules():
+        if isinstance(p, nn.Linear):
+            # p.bias.zero_()
+            p.weight[...] = 2 * torch.eye(2) + torch.randn(2, 2) * 1e-4
+
+optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+criterion = nn.CrossEntropyLoss()
+
+nb_epochs, batch_size = 1000, 25
+
+for k in range(nb_epochs):
+    acc_loss = 0.0
+
+    for input, targets in zip(
+        train_input.split(batch_size), train_targets.split(batch_size)
+    ):
+        output = model(input)
+        loss = criterion(output, targets)
+        acc_loss += loss.item()
+
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+    nb_train_errors = 0
+    for input, targets in zip(
+        train_input.split(batch_size), train_targets.split(batch_size)
+    ):
+        wta = model(input).argmax(1)
+        nb_train_errors += (wta != targets).long().sum()
+    train_error = nb_train_errors / train_input.size(0)
+
+    print(f"loss {k} {acc_loss:.02f} {train_error*100:.02f}%")
+
+    if train_error == 0:
+        break
+
+######################################################################
+
+sg=25
+
+input, targets = train_input, train_targets
+
+grid = torch.linspace(-1.2,1.2,sg)
+grid = torch.cat((grid[:,None,None].expand(sg,sg,1),grid[None,:,None].expand(sg,sg,1)),-1).reshape(-1,2)
+
+for l, m in enumerate(model):
+    with open(os.path.join(args.result_dir, f"warp_{l}.tex"), "w") as f:
+        f.write(
+            """\\addplot[
+    scatter src=explicit symbolic,
+    scatter/classes={0={blue}, 1={red}},
+    scatter, mark=*, only marks, mark options={mark size=0.5},
+]%
+table[meta=label] {
+x y label
+"""
+        )
+        for k in range(512):
+            f.write(f"{input[k,0]} {input[k,1]} {targets[k]}\n")
+        f.write("};\n")
+
+        g = grid.reshape(sg,sg,-1)
+        for i in range(g.size(0)):
+            for j in range(g.size(1)):
+                if j == 0:
+                    pre="\\draw[black!25,very thin] "
+                else:
+                    pre="--"
+                f.write(f"{pre} ({g[i,j,0]},{g[i,j,1]})")
+            f.write(";\n")
+
+        for j in range(g.size(1)):
+            for i in range(g.size(0)):
+                if i == 0:
+                    pre="\\draw[black!25,very thin] "
+                else:
+                    pre="--"
+                f.write(f"{pre} ({g[i,j,0]},{g[i,j,1]})")
+            f.write(";\n")
+
+        # add the decision line
+
+        if l == len(model) - 1:
+            u = torch.tensor([[1.0, -1.0]])
+            phi = model[-1]
+            a, b = (u @ phi.weight).squeeze(), (u @ phi.bias).item()
+            p = a * (b / (a @ a.t()).item())
+            f.write(
+                f"\\draw[black,thick] ({p[0]-a[1]},{p[1]+a[0]}) -- ({p[0]+a[1]},{p[1]-a[0]});"
+            )
+
+    input, grid = m(input), m(grid)
+
+######################################################################