3 # @XREMOTE_HOST: elk.fleuret.org
4 # @XREMOTE_EXEC: python
5 # @XREMOTE_PRE: source ${HOME}/misc/venv/pytorch/bin/activate
6 # @XREMOTE_PRE: killall -u ${USER} -q -9 python || true
7 # @XREMOTE_PRE: ln -sf ${HOME}/data/pytorch ./data
8 # @XREMOTE_SEND: *.py *.sh
10 # Any copyright is dedicated to the Public Domain.
11 # https://creativecommons.org/publicdomain/zero/1.0/
13 # Written by Francois Fleuret <francois@fleuret.org>
17 import torch, torchvision
20 from torch.nn import functional as F
22 ######################################################################
24 nb_quantization_levels = 101
27 def quantize(x, xmin, xmax):
29 ((x - xmin) / (xmax - xmin) * nb_quantization_levels)
31 .clamp(min=0, max=nb_quantization_levels - 1)
35 def dequantize(q, xmin, xmax):
36 return q / nb_quantization_levels * (xmax - xmin) + xmin
39 ######################################################################
42 def generate_sets_and_params(
47 device=torch.device("cpu"),
49 save_as_examples=False,
51 data_input = torch.zeros(batch_nb_mlps, 2 * nb_samples, 2, device=device)
52 data_targets = torch.zeros(
53 batch_nb_mlps, 2 * nb_samples, dtype=torch.int64, device=device
57 nb_values = 2 # more increases the min-max gap
59 rec_support = torch.empty(batch_nb_mlps, nb_rec, 4, device=device)
61 while (data_targets.float().mean(-1) - 0.5).abs().max() > 0.1:
62 i = (data_targets.float().mean(-1) - 0.5).abs() > 0.1
64 support = torch.rand(nb, nb_rec, 2, nb_values, device=device) * 2 - 1
65 support = support.sort(-1).values
66 support = support[:, :, :, torch.tensor([0, nb_values - 1])].view(nb, nb_rec, 4)
68 x = torch.rand(nb, 2 * nb_samples, 2, device=device) * 2 - 1
71 (x[:, None, :, 0] >= support[:, :, None, 0]).long()
72 * (x[:, None, :, 0] <= support[:, :, None, 1]).long()
73 * (x[:, None, :, 1] >= support[:, :, None, 2]).long()
74 * (x[:, None, :, 1] <= support[:, :, None, 3]).long()
80 data_input[i], data_targets[i], rec_support[i] = x, y, support
82 train_input, train_targets = (
83 data_input[:, :nb_samples],
84 data_targets[:, :nb_samples],
86 test_input, test_targets = data_input[:, nb_samples:], data_targets[:, nb_samples:]
88 q_train_input = quantize(train_input, -1, 1)
89 train_input = dequantize(q_train_input, -1, 1)
91 q_test_input = quantize(test_input, -1, 1)
92 test_input = dequantize(q_test_input, -1, 1)
97 * torch.arange(nb_quantization_levels).float()
98 / (nb_quantization_levels - 1)
103 a[:, None, None].expand(
104 nb_quantization_levels, nb_quantization_levels, 1
106 a[None, :, None].expand(
107 nb_quantization_levels, nb_quantization_levels, 1
112 xf = xf.reshape(1, -1, 2).expand(min(q_train_input.size(0), 10), -1, -1)
113 print(f"{xf.size()=} {x.size()=}")
116 (xf[:, None, :, 0] >= rec_support[: xf.size(0), :, None, 0]).long()
117 * (xf[:, None, :, 0] <= rec_support[: xf.size(0), :, None, 1]).long()
118 * (xf[:, None, :, 1] >= rec_support[: xf.size(0), :, None, 2]).long()
119 * (xf[:, None, :, 1] <= rec_support[: xf.size(0), :, None, 3]).long()
125 full_input, full_targets = xf, yf
127 q_full_input = quantize(full_input, -1, 1)
128 full_input = dequantize(q_full_input, -1, 1)
130 for k in range(q_full_input[:10].size(0)):
131 with open(f"example_full_{k:04d}.dat", "w") as f:
132 for u, c in zip(full_input[k], full_targets[k]):
133 f.write(f"{c} {u[0].item()} {u[1].item()}\n")
135 for k in range(q_train_input[:10].size(0)):
136 with open(f"example_train_{k:04d}.dat", "w") as f:
137 for u, c in zip(train_input[k], train_targets[k]):
138 f.write(f"{c} {u[0].item()} {u[1].item()}\n")
141 w1 = torch.randn(batch_nb_mlps, hidden_dim, 2, device=device) / math.sqrt(2)
142 b1 = torch.zeros(batch_nb_mlps, hidden_dim, device=device)
143 w2 = torch.randn(batch_nb_mlps, 2, hidden_dim, device=device) / math.sqrt(
146 b2 = torch.zeros(batch_nb_mlps, 2, device=device)
152 optimizer = torch.optim.Adam([w1, b1, w2, b2], lr=1e-2)
154 criterion = nn.CrossEntropyLoss()
157 for k in range(nb_epochs):
161 for input, targets in zip(
162 train_input.split(batch_size, dim=1), train_targets.split(batch_size, dim=1)
164 h = torch.einsum("mij,mnj->mni", w1, input) + b1[:, None, :]
166 output = torch.einsum("mij,mnj->mni", w2, h) + b2[:, None, :]
167 loss = F.cross_entropy(
168 output.reshape(-1, output.size(-1)), targets.reshape(-1)
170 acc_train_loss += loss.item() * input.size(0)
172 wta = output.argmax(-1)
173 nb_train_errors += (wta != targets).long().sum(-1)
175 optimizer.zero_grad()
179 with torch.no_grad():
180 for p in [w1, b1, w2, b2]:
182 torch.rand(p.size(), device=p.device) <= k / (nb_epochs - 1)
184 pq = quantize(p, -2, 2)
185 p[...] = (1 - m) * p + m * dequantize(pq, -2, 2)
187 train_error = nb_train_errors / train_input.size(1)
188 acc_train_loss = acc_train_loss / train_input.size(1)
190 # print(f"{k=} {acc_train_loss=} {train_error=}")
195 for input, targets in zip(
196 test_input.split(batch_size, dim=1), test_targets.split(batch_size, dim=1)
198 h = torch.einsum("mij,mnj->mni", w1, input) + b1[:, None, :]
200 output = torch.einsum("mij,mnj->mni", w2, h) + b2[:, None, :]
201 loss = F.cross_entropy(output.reshape(-1, output.size(-1)), targets.reshape(-1))
202 acc_test_loss += loss.item() * input.size(0)
204 wta = output.argmax(-1)
205 nb_test_errors += (wta != targets).long().sum(-1)
207 test_error = nb_test_errors / test_input.size(1)
208 q_params = torch.cat(
209 [quantize(p.view(batch_nb_mlps, -1), -2, 2) for p in [w1, b1, w2, b2]], dim=1
211 q_train_set = torch.cat([q_train_input, train_targets[:, :, None]], -1).reshape(
214 q_test_set = torch.cat([q_test_input, test_targets[:, :, None]], -1).reshape(
218 return q_train_set, q_test_set, q_params, test_error
221 ######################################################################
224 def evaluate_q_params(
228 device=torch.device("cpu"),
229 nb_mlps_per_batch=1024,
230 save_as_examples=False,
233 nb_mlps = q_params.size(0)
235 for n in range(0, nb_mlps, nb_mlps_per_batch):
236 batch_nb_mlps = min(nb_mlps_per_batch, nb_mlps - n)
237 batch_q_params = q_params[n : n + batch_nb_mlps]
238 batch_q_set = q_set[n : n + batch_nb_mlps]
240 w1 = torch.empty(batch_nb_mlps, hidden_dim, 2, device=device)
241 b1 = torch.empty(batch_nb_mlps, hidden_dim, device=device)
242 w2 = torch.empty(batch_nb_mlps, 2, hidden_dim, device=device)
243 b2 = torch.empty(batch_nb_mlps, 2, device=device)
245 with torch.no_grad():
247 for p in [w1, b1, w2, b2]:
248 print(f"{p.size()=}")
250 batch_q_params[:, k : k + p.numel() // batch_nb_mlps], -2, 2
253 k += p.numel() // batch_nb_mlps
255 batch_q_set = batch_q_set.view(batch_nb_mlps, -1, 3)
256 data_input = dequantize(batch_q_set[:, :, :2], -1, 1).to(device)
257 data_targets = batch_q_set[:, :, 2].to(device)
259 print(f"{data_input.size()=} {data_targets.size()=}")
261 criterion = nn.CrossEntropyLoss()
267 for input, targets in zip(
268 data_input.split(batch_size, dim=1), data_targets.split(batch_size, dim=1)
270 h = torch.einsum("mij,mnj->mni", w1, input) + b1[:, None, :]
272 output = torch.einsum("mij,mnj->mni", w2, h) + b2[:, None, :]
273 loss = F.cross_entropy(
274 output.reshape(-1, output.size(-1)), targets.reshape(-1)
276 acc_loss += loss.item() * input.size(0)
277 wta = output.argmax(-1)
278 nb_errors += (wta != targets).long().sum(-1)
280 errors.append(nb_errors / data_input.size(1))
281 acc_loss = acc_loss / data_input.size(1)
283 return torch.cat(errors)
286 ######################################################################
289 def generate_sequence_and_test_set(
295 nb_mlps_per_batch=1024,
297 seqs, q_test_sets, test_errors = [], [], []
299 for n in range(0, nb_mlps, nb_mlps_per_batch):
300 q_train_set, q_test_set, q_params, test_error = generate_sets_and_params(
301 batch_nb_mlps=min(nb_mlps_per_batch, nb_mlps - n),
302 nb_samples=nb_samples,
303 batch_size=batch_size,
312 q_train_set.new_full(
317 nb_quantization_levels,
325 q_test_sets.append(q_test_set)
326 test_errors.append(test_error)
328 seq = torch.cat(seqs)
329 q_test_set = torch.cat(q_test_sets)
330 test_error = torch.cat(test_errors)
332 return seq, q_test_set, test_error
335 ######################################################################
337 if __name__ == "__main__":
340 batch_nb_mlps, nb_samples = 128, 250
342 generate_sets_and_params(
344 nb_samples=nb_samples,
347 device=torch.device("cpu"),
349 save_as_examples=True,
354 device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
356 start_time = time.perf_counter()
360 seq, q_test_set, test_error = generate_sequence_and_test_set(
361 nb_mlps=batch_nb_mlps,
362 nb_samples=nb_samples,
366 nb_mlps_per_batch=17,
369 end_time = time.perf_counter()
370 print(f"{seq.size(0) / (end_time - start_time):.02f} samples per second")
372 q_train_set = seq[:, : nb_samples * 3]
373 q_params = seq[:, nb_samples * 3 + 1 :]
374 print(f"SANITY #2 {q_train_set.size()=} {q_params.size()=} {seq.size()=}")
375 error_train = evaluate_q_params(q_params, q_train_set, nb_mlps_per_batch=17)
376 print(f"train {error_train*100}%")
377 error_test = evaluate_q_params(q_params, q_test_set, nb_mlps_per_batch=17)
378 print(f"test {error_test*100}%")