From ffeb45668535e2c338feb5dfb50c4d0ab22ae2a9 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Tue, 26 Mar 2024 19:55:44 +0100 Subject: [PATCH 1/3] Update. --- bit_mlp.py | 49 ++++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/bit_mlp.py b/bit_mlp.py index 90409f2..6f7f92e 100755 --- a/bit_mlp.py +++ b/bit_mlp.py @@ -116,33 +116,40 @@ for linear_layer in errors.keys(): ###################################################################### - errors[linear_layer].append((nb_hidden, test_error)) + errors[linear_layer].append( + (nb_hidden, test_error * 100, acc_train_loss / train_input.size(0)) + ) import matplotlib.pyplot as plt -fig = plt.figure() -fig.set_figheight(6) -fig.set_figwidth(8) -ax = fig.add_subplot(1, 1, 1) +def save_fig(filename, ymax, ylabel, index): + fig = plt.figure() + fig.set_figheight(6) + fig.set_figwidth(8) -ax.set_ylim(0, 1) -ax.spines.right.set_visible(False) -ax.spines.top.set_visible(False) -ax.set_xscale("log") -ax.set_xlabel("Nb hidden units") -ax.set_ylabel("Test error (%)") + ax = fig.add_subplot(1, 1, 1) -X = torch.tensor([x[0] for x in errors[nn.Linear]]) -Y = torch.tensor([x[1] for x in errors[nn.Linear]]) -ax.plot(X, Y, color="gray", label="nn.Linear") + ax.set_ylim(0, ymax) + ax.spines.right.set_visible(False) + ax.spines.top.set_visible(False) + ax.set_xscale("log") + ax.set_xlabel("Nb hidden units") + ax.set_ylabel(ylabel) -X = torch.tensor([x[0] for x in errors[QLinear]]) -Y = torch.tensor([x[1] for x in errors[QLinear]]) -ax.plot(X, Y, color="red", label="QLinear") + X = torch.tensor([x[0] for x in errors[nn.Linear]]) + Y = torch.tensor([x[index] for x in errors[nn.Linear]]) + ax.plot(X, Y, color="gray", label="nn.Linear") -ax.legend(frameon=False, loc=1) + X = torch.tensor([x[0] for x in errors[QLinear]]) + Y = torch.tensor([x[index] for x in errors[QLinear]]) + ax.plot(X, Y, color="red", label="QLinear") -filename = f"bit_mlp.pdf" -print(f"saving {filename}") -fig.savefig(filename, bbox_inches="tight") + ax.legend(frameon=False, loc=1) + + print(f"saving {filename}") + fig.savefig(filename, bbox_inches="tight") + + +save_fig("bit_mlp_err.pdf", ymax=15, ylabel="Test error (%)", index=1) +save_fig("bit_mlp_loss.pdf", ymax=1.25, ylabel="Train loss", index=2) -- 2.20.1 From 150a02f9e495036849d0093e7371c3cf26ed6c63 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Thu, 13 Jun 2024 19:59:09 +0200 Subject: [PATCH 2/3] Update. --- redshift.py | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 redshift.py diff --git a/redshift.py b/redshift.py new file mode 100755 index 0000000..b3507ed --- /dev/null +++ b/redshift.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +import math + +import torch, torchvision + +from torch import nn +from torch.nn import functional as F + +torch.set_default_dtype(torch.float64) + +res = 256 +nh = 100 + +input = torch.cat( + [ + torch.linspace(-1, 1, res)[None, :, None].expand(res, res, 1), + torch.linspace(-1, 1, res)[:, None, None].expand(res, res, 1), + ], + dim=-1, +).reshape(-1, 2) + + +class Angles(nn.Module): + def forward(self, x): + return x.clamp(min=-0.5, max=0.5) + + +for activation in [nn.ReLU, nn.Tanh, nn.Softplus, Angles]: + for s in [1.0, 10.0]: + layers = [nn.Linear(2, nh), activation()] + nb_hidden = 4 + for k in range(nb_hidden): + layers += [nn.Linear(nh, nh), activation()] + layers += [nn.Linear(nh, 2)] + model = nn.Sequential(*layers) + + with torch.no_grad(): + for p in model.parameters(): + p *= s + + output = model(input) + + img = (output[:, 1] - output[:, 0]).reshape(1, 1, res, res) + + img = (img - img.mean()) / (1 * img.std()) + + img = img.clamp(min=-1, max=1) + + img = torch.cat( + [ + (1 + img).clamp(max=1), + (1 - img.abs()).clamp(min=0), + (1 - img).clamp(max=1), + ], + dim=1, + ) + + name_activation = { + nn.ReLU: "relu", + nn.Tanh: "tanh", + nn.Softplus: "softplus", + Angles: "angles", + }[activation] + + torchvision.utils.save_image(img, f"result-{name_activation}-{s}.png") -- 2.20.1 From 3afcea624963ad2d381c19a7d54bb26e218c5bce Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Thu, 13 Jun 2024 20:05:29 +0200 Subject: [PATCH 3/3] Update. --- redshift.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/redshift.py b/redshift.py index b3507ed..2ed1e52 100755 --- a/redshift.py +++ b/redshift.py @@ -9,8 +9,10 @@ from torch.nn import functional as F torch.set_default_dtype(torch.float64) +nb_hidden = 5 +hidden_dim = 100 + res = 256 -nh = 100 input = torch.cat( [ @@ -28,11 +30,10 @@ class Angles(nn.Module): for activation in [nn.ReLU, nn.Tanh, nn.Softplus, Angles]: for s in [1.0, 10.0]: - layers = [nn.Linear(2, nh), activation()] - nb_hidden = 4 - for k in range(nb_hidden): - layers += [nn.Linear(nh, nh), activation()] - layers += [nn.Linear(nh, 2)] + layers = [nn.Linear(2, hidden_dim), activation()] + for k in range(nb_hidden - 1): + layers += [nn.Linear(hidden_dim, hidden_dim), activation()] + layers += [nn.Linear(hidden_dim, 2)] model = nn.Sequential(*layers) with torch.no_grad(): -- 2.20.1