517f29a5f1702ea7630600a44126633603d25125
[beaver.git] / beaver.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
10
11 import math, sys, argparse, time, tqdm, itertools, os
12
13 import torch, torchvision
14 from torch import nn
15 from torch.nn import functional as F
16
17 import mygpt, tensorstack
18
19 ######################################################################
20
21 if torch.cuda.is_available():
22     device = torch.device("cuda")
23     torch.backends.cuda.matmul.allow_tf32 = True
24 else:
25     device = torch.device("cpu")
26
27 ######################################################################
28
29 parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
30
31 parser.add_argument("--log_filename", type=str, default="train.log")
32
33 parser.add_argument("--result_dir", type=str, default="results_default")
34
35 parser.add_argument("--seed", type=int, default=0)
36
37 parser.add_argument("--nb_epochs", type=int, default=25)
38
39 parser.add_argument("--nb_train_samples", type=int, default=200000)
40
41 parser.add_argument("--nb_test_samples", type=int, default=50000)
42
43 parser.add_argument("--batch_size", type=int, default=25)
44
45 parser.add_argument("--optim", type=str, default="adam")
46
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
48
49 parser.add_argument(
50     "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
51 )
52
53 parser.add_argument("--dim_model", type=int, default=512)
54
55 parser.add_argument("--dim_keys", type=int, default=64)
56
57 parser.add_argument("--dim_hidden", type=int, default=2048)
58
59 parser.add_argument("--nb_heads", type=int, default=8)
60
61 parser.add_argument("--nb_blocks", type=int, default=12)
62
63 parser.add_argument("--dropout", type=float, default=0.1)
64
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
66
67 parser.add_argument("--no_checkpoint", action="store_true", default=False)
68
69 parser.add_argument("--overwrite_results", action="store_true", default=False)
70
71 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
72
73 ##############################
74 # maze options
75
76 parser.add_argument("--maze_height", type=int, default=13)
77
78 parser.add_argument("--maze_width", type=int, default=21)
79
80 parser.add_argument("--maze_nb_walls", type=int, default=15)
81
82 ######################################################################
83
84 args = parser.parse_args()
85
86 try:
87     os.mkdir(args.result_dir)
88 except FileExistsError:
89     if not args.overwrite_results:
90         print(f"result directory {args.result_dir} already exists")
91         exit(1)
92
93 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
94
95 if args.seed >= 0:
96     # torch.backends.cudnn.deterministic = True
97     # torch.backends.cudnn.benchmark = False
98     # torch.use_deterministic_algorithms(True)
99     torch.manual_seed(args.seed)
100     if torch.cuda.is_available():
101         torch.cuda.manual_seed_all(args.seed)
102
103 ######################################################################
104
105
106 def log_string(s):
107     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
108
109     if log_file is not None:
110         log_file.write(t + s + "\n")
111         log_file.flush()
112
113     print(t + s)
114     sys.stdout.flush()
115
116
117 for n in vars(args):
118     log_string(f"args.{n} {getattr(args, n)}")
119
120 ######################################################################
121
122
123 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
124 # tokens that should be generated
125
126
127 def masked_inplace_autoregression(model, batch_size, input, ar_mask):
128
129     for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
130         i = (ar_mask.sum(0) > 0).nonzero()
131         if i.min() > 0:
132             # Needed to initialize the model's cache
133             model(mygpt.BracketedSequence(input, 0, i.min()))
134         for s in range(i.min(), i.max() + 1):
135             output = model(mygpt.BracketedSequence(input, s, 1)).x
136             logits = output[:, s]
137             if args.deterministic_synthesis:
138                 t_next = logits.argmax(1)
139             else:
140                 dist = torch.distributions.categorical.Categorical(logits=logits)
141                 t_next = dist.sample()
142             input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
143
144
145 ######################################################################
146
147
148 class Task:
149     def batches(self, split="train"):
150         pass
151
152     def vocabulary_size(self):
153         pass
154
155     def produce_results(self, n_epoch, model):
156         pass
157
158
159 ######################################################################
160
161 import maze
162
163
164 class TaskMaze(Task):
165     def map2seq(self, *m):
166         return torch.cat([x.flatten(1) for x in m], 1)
167
168     def seq2map(self, s):
169         s = s.reshape(s.size(0), -1, self.height, self.width)
170         return (s[:, k] for k in range(s.size(1)))
171
172     def __init__(
173         self,
174         nb_train_samples,
175         nb_test_samples,
176         batch_size,
177         height,
178         width,
179         nb_walls,
180         device=torch.device("cpu"),
181     ):
182         self.batch_size = batch_size
183         self.height = height
184         self.width = width
185         self.device = device
186
187         mazes_train, paths_train = maze.create_maze_data(
188             nb_train_samples,
189             height=height,
190             width=width,
191             nb_walls=nb_walls,
192             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
193         )
194         mazes_train, paths_train = mazes_train.to(device), paths_train.to(device)
195         self.train_input = self.map2seq(mazes_train, paths_train)
196
197         mazes_test, paths_test = maze.create_maze_data(
198             nb_test_samples,
199             height=height,
200             width=width,
201             nb_walls=nb_walls,
202             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
203         )
204         mazes_test, paths_test = mazes_test.to(device), paths_test.to(device)
205         self.test_input = self.map2seq(mazes_test, paths_test)
206
207         self.nb_codes = self.train_input.max() + 1
208
209     def batches(self, split="train", nb_to_use=-1):
210         assert split in {"train", "test"}
211         input = self.train_input if split == "train" else self.test_input
212         if nb_to_use > 0:
213             input = input[:nb_to_use]
214         for batch in tqdm.tqdm(
215             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
216         ):
217             yield batch
218
219     def vocabulary_size(self):
220         return self.nb_codes
221
222     def compute_error(self, model, split="train", nb_to_use=-1):
223         nb_total, nb_correct = 0, 0
224         for input in task.batches(split, nb_to_use):
225             result = input.clone()
226             ar_mask = result.new_zeros(result.size())
227             ar_mask[:, self.height * self.width :] = 1
228             result *= 1 - ar_mask
229             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
230             mazes, paths = self.seq2map(result)
231             nb_correct += maze.path_correctness(mazes, paths).long().sum()
232             nb_total += mazes.size(0)
233
234         return nb_total, nb_correct
235
236     def produce_results(self, n_epoch, model):
237         with torch.autograd.no_grad():
238             t = model.training
239             model.eval()
240
241             train_nb_total, train_nb_correct = self.compute_error(
242                 model, "train", nb_to_use=1000
243             )
244             log_string(
245                 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
246             )
247
248             test_nb_total, test_nb_correct = self.compute_error(
249                 model, "test", nb_to_use=1000
250             )
251             log_string(
252                 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
253             )
254
255             input = self.test_input[:32]
256             result = input.clone()
257             ar_mask = result.new_zeros(result.size())
258             ar_mask[:, self.height * self.width :] = 1
259             result *= 1 - ar_mask
260             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
261
262             mazes, paths = self.seq2map(input)
263             _, predicted_paths = self.seq2map(result)
264             maze.save_image(
265                 os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
266                 mazes,
267                 paths,
268                 predicted_paths,
269                 maze.path_correctness(mazes, predicted_paths),
270             )
271
272             model.train(t)
273
274
275 ######################################################################
276
277 log_string(f"device {device}")
278
279
280 task = TaskMaze(
281     nb_train_samples=args.nb_train_samples,
282     nb_test_samples=args.nb_test_samples,
283     batch_size=args.batch_size,
284     height=args.maze_height,
285     width=args.maze_width,
286     nb_walls=args.maze_nb_walls,
287     device=device,
288 )
289
290
291 vocabulary_size = task.vocabulary_size()
292
293 log_string(f"vocabulary_size {vocabulary_size}")
294
295 ##############################
296
297 model = mygpt.MyGPT(
298     vocabulary_size=vocabulary_size,
299     dim_model=args.dim_model,
300     dim_keys=args.dim_keys,
301     dim_hidden=args.dim_hidden,
302     nb_heads=args.nb_heads,
303     nb_blocks=args.nb_blocks,
304     causal=True,
305     dropout=args.dropout,
306 )
307
308 model.to(device)
309
310 nb_parameters = sum(p.numel() for p in model.parameters())
311 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
312
313 ######################################################################
314
315 nb_epochs_finished = 0
316
317 if args.no_checkpoint:
318     log_string(f"not trying to load checkpoint.")
319
320 else:
321     try:
322         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
323         checkpoint = torch.load(checkpoint_name)
324         nb_epochs_finished = checkpoint["nb_epochs_finished"]
325         model.load_state_dict(checkpoint["model_state"])
326         torch.set_rng_state(checkpoint["rng_state"])
327         if torch.cuda.is_available():
328             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
329
330         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
331
332     except FileNotFoundError:
333         log_string("starting from scratch.")
334
335     except:
336         log_string("error when loading the checkpoint.")
337         exit(1)
338
339 ######################################################################
340
341 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
342
343 token_count = 0
344 for input in task.batches(split="train"):
345     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
346 token_probas = token_count / token_count.sum()
347 entropy = -torch.xlogy(token_probas, token_probas).sum()
348 train_set_perplexity = math.exp(entropy)
349
350 ##############################
351
352 if args.learning_rate_schedule == "cos":
353     learning_rate_schedule = {}
354     for n_epoch in range(args.nb_epochs):
355         u = n_epoch / args.nb_epochs * math.pi
356         learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
357 else:
358     u = {
359         int(k): float(v)
360         for k, v in [
361             tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
362         ]
363     }
364
365     learning_rate_schedule = {}
366     learning_rate = args.learning_rate
367     for n_epoch in range(args.nb_epochs):
368         if n_epoch in u:
369             learning_rate = u[n_epoch]
370         learning_rate_schedule[n_epoch] = learning_rate
371
372 log_string(f"learning_rate_schedule {learning_rate_schedule}")
373
374 ##############################
375
376 nb_samples_seen = 0
377
378 if nb_epochs_finished >= nb_epochs:
379     task.produce_results(nb_epochs_finished, model)
380
381 for n_epoch in range(nb_epochs_finished, nb_epochs):
382
383     learning_rate = learning_rate_schedule[n_epoch]
384
385     log_string(f"learning_rate {learning_rate}")
386
387     if args.optim == "sgd":
388         optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
389     elif args.optim == "adam":
390         optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
391     elif args.optim == "adamw":
392         optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
393     else:
394         raise ValueError(f"Unknown optimizer {args.optim}.")
395
396     model.train()
397
398     nb_train_samples, acc_train_loss = 0, 0.0
399
400     for input in task.batches(split="train"):
401         input = input.to(device)
402         output = model(mygpt.BracketedSequence(input)).x
403         loss = F.cross_entropy(output.transpose(1, 2), input)
404         acc_train_loss += loss.item() * input.size(0)
405         nb_train_samples += input.size(0)
406         nb_samples_seen += input.size(0)
407
408         optimizer.zero_grad()
409         loss.backward()
410         optimizer.step()
411
412     with torch.autograd.no_grad():
413
414         model.eval()
415
416         nb_test_samples, acc_test_loss = 0, 0.0
417
418         for input in task.batches(split="test"):
419             input = input.to(device)
420
421             output = model(mygpt.BracketedSequence(input)).x
422             loss = F.cross_entropy(output.transpose(1, 2), input)
423             acc_test_loss += loss.item() * input.size(0)
424             nb_test_samples += input.size(0)
425
426         train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
427         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
428
429         log_string(
430             f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
431         )
432
433         task.produce_results(n_epoch, model)
434
435     checkpoint = {
436         "nb_epochs_finished": n_epoch + 1,
437         "model_state": model.state_dict(),
438         "rng_state": torch.get_rng_state(),
439     }
440
441     if torch.cuda.is_available():
442         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
443
444     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
445     torch.save(checkpoint, checkpoint_name)
446     log_string(f"saved checkpoint {checkpoint_name}")
447
448 ######################################################################