4 # Any copyright is dedicated to the Public Domain.
5 # https://creativecommons.org/publicdomain/zero/1.0/
7 # Written by Francois Fleuret <francois@fleuret.org>
9 import math, argparse, os
11 import torch, torchvision
14 from torch.nn import functional as F
16 ######################################################################
18 parser = argparse.ArgumentParser()
20 parser.add_argument("--result_dir", type=str, default="/tmp")
22 args = parser.parse_args()
24 ######################################################################
26 # If the source is older than the result, do nothing
28 ref_filename = os.path.join(args.result_dir, f"warp_0.tex")
30 if os.path.exists(ref_filename) and os.path.getmtime(__file__) < os.path.getmtime(
35 ######################################################################
40 x = torch.rand(nb, 2) * torch.tensor([math.pi * 1.5, 0.10]) + torch.tensor(
41 [math.pi * -0.25, 0.25]
44 train_targets = (torch.rand(nb) < 0.5).long()
45 train_input = torch.cat((x[:, 0:1].sin() * x[:, 1:2], x[:, 0:1].cos() * x[:, 1:2]), 1)
46 train_input[:, 0] *= train_targets * 2 - 1
47 train_input[:, 0] += 0.05 * (train_targets * 2 - 1)
48 train_input[:, 1] -= 0.15 * (train_targets * 2 - 1)
52 class WithResidual(nn.Module):
53 def __init__(self, *f):
55 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
58 return 0.5 * x + 0.5 * self.f(x)
61 model = nn.Sequential(
62 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
63 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
64 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
65 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
66 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
67 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
68 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
69 nn.Sequential(nn.Linear(2, 2, bias=False), nn.Tanh()),
74 for p in model.modules():
75 if isinstance(p, nn.Linear):
77 p.weight[...] = 2 * torch.eye(2) + torch.randn(2, 2) * 1e-4
79 optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
80 criterion = nn.CrossEntropyLoss()
82 nb_epochs, batch_size = 1000, 25
84 for k in range(nb_epochs):
87 for input, targets in zip(
88 train_input.split(batch_size), train_targets.split(batch_size)
91 loss = criterion(output, targets)
92 acc_loss += loss.item()
99 for input, targets in zip(
100 train_input.split(batch_size), train_targets.split(batch_size)
102 wta = model(input).argmax(1)
103 nb_train_errors += (wta != targets).long().sum()
104 train_error = nb_train_errors / train_input.size(0)
106 print(f"loss {k} {acc_loss:.02f} {train_error*100:.02f}%")
111 ######################################################################
115 input, targets = train_input, train_targets
117 grid = torch.linspace(-1.2,1.2,sg)
118 grid = torch.cat((grid[:,None,None].expand(sg,sg,1),grid[None,:,None].expand(sg,sg,1)),-1).reshape(-1,2)
120 for l, m in enumerate(model):
121 with open(os.path.join(args.result_dir, f"warp_{l}.tex"), "w") as f:
124 scatter src=explicit symbolic,
125 scatter/classes={0={blue}, 1={red}},
126 scatter, mark=*, only marks, mark options={mark size=0.5},
133 f.write(f"{input[k,0]} {input[k,1]} {targets[k]}\n")
136 g = grid.reshape(sg,sg,-1)
137 for i in range(g.size(0)):
138 for j in range(g.size(1)):
140 pre="\\draw[black!25,very thin] "
143 f.write(f"{pre} ({g[i,j,0]},{g[i,j,1]})")
146 for j in range(g.size(1)):
147 for i in range(g.size(0)):
149 pre="\\draw[black!25,very thin] "
152 f.write(f"{pre} ({g[i,j,0]},{g[i,j,1]})")
155 # add the decision line
157 if l == len(model) - 1:
158 u = torch.tensor([[1.0, -1.0]])
160 a, b = (u @ phi.weight).squeeze(), (u @ phi.bias).item()
161 p = a * (b / (a @ a.t()).item())
163 f"\\draw[black,thick] ({p[0]-a[1]},{p[1]+a[0]}) -- ({p[0]+a[1]},{p[1]-a[0]});"
166 input, grid = m(input), m(grid)
168 ######################################################################