9d950344d1579d1e1678b075ef48053f39689962
[culture.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt
16 import sky, wireworld, quizz_machine
17
18 # world quizzes vs. culture quizzes
19
20 ######################################################################
21
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
24
25 ######################################################################
26
27 if torch.cuda.is_available():
28     device = torch.device("cuda")
29     torch.backends.cuda.matmul.allow_tf32 = True
30 else:
31     device = torch.device("cpu")
32
33 ######################################################################
34
35 parser = argparse.ArgumentParser(
36     description="An implementation of GPT with cache.",
37     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
38 )
39
40 parser.add_argument("--log_filename", type=str, default="train.log")
41
42 parser.add_argument("--result_dir", type=str, default=None)
43
44 parser.add_argument("--seed", type=int, default=0)
45
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47
48 ########################################
49
50 parser.add_argument("--nb_epochs", type=int, default=10000)
51
52 parser.add_argument("--batch_size", type=int, default=None)
53
54 parser.add_argument("--physical_batch_size", type=int, default=None)
55
56 parser.add_argument("--nb_train_samples", type=int, default=None)
57
58 parser.add_argument("--nb_test_samples", type=int, default=None)
59
60 parser.add_argument("--learning_rate", type=float, default=1e-3)
61
62 ########################################
63
64 parser.add_argument("--model", type=str, default=None)
65
66 parser.add_argument("--dim_model", type=int, default=None)
67
68 parser.add_argument("--dim_keys", type=int, default=None)
69
70 parser.add_argument("--dim_hidden", type=int, default=None)
71
72 parser.add_argument("--nb_heads", type=int, default=None)
73
74 parser.add_argument("--nb_blocks", type=int, default=None)
75
76 parser.add_argument("--dropout", type=float, default=0.1)
77
78 ########################################
79
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
81
82 parser.add_argument("--reverse_cleanup", action="store_true", default=False)
83
84 parser.add_argument("--problem", type=str, default="sky")
85
86 parser.add_argument("--nb_gpts", type=int, default=5)
87
88 parser.add_argument("--nb_models_for_generation", type=int, default=1)
89
90 parser.add_argument("--generation_mode", type=str, default="groupthink")
91
92 parser.add_argument("--min_to_validate", type=int, default=4)
93
94 parser.add_argument("--max_to_validate", type=int, default=4)
95
96 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
97
98 parser.add_argument("--dirty_debug", action="store_true", default=False)
99
100 parser.add_argument("--sky_height", type=int, default=6)
101
102 parser.add_argument("--sky_width", type=int, default=8)
103
104 parser.add_argument("--sky_nb_birds", type=int, default=3)
105
106 parser.add_argument("--sky_nb_iterations", type=int, default=2)
107
108 parser.add_argument("--sky_speed", type=int, default=3)
109
110 ######################################################################
111
112 args = parser.parse_args()
113
114 if args.result_dir is None:
115     args.result_dir = f"results_culture"
116
117 ######################################################################
118
119 if args.dirty_debug:
120     args.accuracy_to_make_c_quizzes = 0.0
121     nb_new_c_quizzes_for_train = 100
122     nb_new_c_quizzes_for_test = 10
123
124 ######################################################################
125
126 default_args = {
127     "model": "37M",
128     "batch_size": 100,
129     "nb_train_samples": 100000,
130     "nb_test_samples": 10000,
131 }
132
133 for k, v in default_args.items():
134     if getattr(args, k) is None:
135         setattr(args, k, v)
136
137 ######################################################################
138
139 default_model_args = {
140     "17K": {
141         "dim_model": 32,
142         "dim_keys": 32,
143         "dim_hidden": 32,
144         "nb_heads": 2,
145         "nb_blocks": 2,
146     },
147     "4M": {
148         "dim_model": 256,
149         "dim_keys": 32,
150         "dim_hidden": 1024,
151         "nb_heads": 4,
152         "nb_blocks": 6,
153     },
154     "37M": {
155         "dim_model": 512,
156         "dim_keys": 64,
157         "dim_hidden": 2048,
158         "nb_heads": 8,
159         "nb_blocks": 12,
160     },
161     "122M": {
162         "dim_model": 768,
163         "dim_keys": 64,
164         "dim_hidden": 2048,
165         "nb_heads": 8,
166         "nb_blocks": 24,
167     },
168     "352M": {
169         "dim_model": 1024,
170         "dim_keys": 64,
171         "dim_hidden": 2048,
172         "nb_heads": 8,
173         "nb_blocks": 48,
174     },
175 }
176
177 if args.model in default_model_args:
178     for k, v in default_model_args[args.model].items():
179         if getattr(args, k) is None:
180             setattr(args, k, v)
181 else:
182     raise ValueError(f"Unknown model {args.model}")
183
184 ######################################################################
185
186 try:
187     os.mkdir(args.result_dir)
188 except FileExistsError:
189     print(f"result directory {args.result_dir} already exists")
190     exit(1)
191
192 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
193
194 if args.seed >= 0:
195     # torch.backends.cudnn.deterministic = True
196     # torch.backends.cudnn.benchmark = False
197     # torch.use_deterministic_algorithms(True)
198     torch.manual_seed(args.seed)
199     if torch.cuda.is_available():
200         torch.cuda.manual_seed_all(args.seed)
201
202 ######################################################################
203
204
205 def log_string(s):
206     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
207
208     if log_file is not None:
209         log_file.write(t + s + "\n")
210         log_file.flush()
211
212     print(t + s)
213     sys.stdout.flush()
214
215
216 log_string(f"argv {' '.join(sys.argv)}")
217
218 for n in vars(args):
219     log_string(f"args.{n} {getattr(args, n)}")
220
221
222 ######################################################################
223
224 if args.dirty_debug:
225     args.nb_train_samples = 2500
226     args.nb_test_samples = 100
227
228 if args.physical_batch_size is None:
229     args.physical_batch_size = args.batch_size
230 else:
231     assert args.batch_size % args.physical_batch_size == 0
232
233 assert args.nb_train_samples % args.batch_size == 0
234 assert args.nb_test_samples % args.batch_size == 0
235
236 if args.problem == "sky":
237     problem = sky.Sky(
238         height=args.sky_height,
239         width=args.sky_width,
240         nb_birds=args.sky_nb_birds,
241         nb_iterations=args.sky_nb_iterations,
242         speed=args.sky_speed,
243     )
244 elif args.problem == "wireworld":
245     problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5)
246 else:
247     raise ValueError
248
249 quizz_machine = quizz_machine.QuizzMachine(
250     problem=problem,
251     nb_train_samples=args.nb_train_samples,
252     nb_test_samples=args.nb_test_samples,
253     batch_size=args.physical_batch_size,
254     result_dir=args.result_dir,
255     logger=log_string,
256     device=device,
257 )
258
259 ######################################################################
260
261 log_string(f"device {device}")
262
263 vocabulary_size = quizz_machine.vocabulary_size()
264
265 log_string(f"vocabulary_size {vocabulary_size}")
266
267 ######################################################################
268
269 # Compute the entropy of the training tokens
270
271 token_count = 0
272 for input in quizz_machine.batches(split="train", desc="train-entropy"):
273     token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
274         (0, 1)
275     )
276 token_probas = token_count / token_count.sum()
277 entropy = -torch.xlogy(token_probas, token_probas).sum()
278 train_set_perplexity = math.exp(entropy)
279
280 ######################################################################
281 # A bit of paranoia never hurts
282
283 if args.max_percents_of_test_in_train >= 0:
284
285     def subsets_as_tuples(batches, cs):
286         s = set()
287         for batch in batches:
288             for x in batch:
289                 s.add(tuple([v.item() for v in x]))
290                 if len(s) == cs:
291                     yield s
292                     s = set()
293         yield s
294
295     nb_test, nb_in_train = 0, 0
296     for test_subset in subsets_as_tuples(
297         quizz_machine.batches(split="test", desc="test-check"), 25000
298     ):
299         in_train = set()
300         for train_subset in subsets_as_tuples(
301             quizz_machine.batches(split="train", desc="train-check"), 25000
302         ):
303             in_train.update(test_subset.intersection(train_subset))
304         nb_in_train += len(in_train)
305         nb_test += len(test_subset)
306
307     log_string(
308         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
309     )
310
311     assert (
312         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
313     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
314
315 ##############################
316
317
318 def one_epoch(model, quizz_machine):
319     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
320
321     model.train()
322
323     nb_train_samples, acc_train_loss = 0, 0.0
324
325     for input in quizz_machine.batches(split="train"):
326         input = input.to(device)
327
328         if nb_train_samples % args.batch_size == 0:
329             optimizer.zero_grad()
330
331         output = model(mygpt.BracketedSequence(input)).x
332         loss = F.cross_entropy(output.transpose(1, 2), input)
333         acc_train_loss += loss.item() * input.size(0)
334
335         nb_train_samples += input.size(0)
336
337         loss.backward()
338
339         if nb_train_samples % args.batch_size == 0:
340             optimizer.step()
341
342     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
343
344     log_string(f"train_perplexity {n_epoch} {train_perplexity}")
345
346
347 ######################################################################
348
349
350 def run_tests(model, quizz_machine, deterministic_synthesis):
351     with torch.autograd.no_grad():
352         model.eval()
353
354         nb_test_samples, acc_test_loss = 0, 0.0
355         nb_samples_accumulated = 0
356
357         for input in quizz_machine.batches(split="test"):
358             input = input.to(device)
359
360             bs = model(mygpt.BracketedSequence(input))
361             output = bs.x
362
363             loss = F.cross_entropy(output.transpose(1, 2), input)
364
365             acc_test_loss += loss.item() * input.size(0)
366
367             nb_test_samples += input.size(0)
368
369         main_test_accuracy = quizz_machine.produce_results(
370             n_epoch=n_epoch,
371             model=model,
372             result_dir=args.result_dir,
373             deterministic_synthesis=deterministic_synthesis,
374         )
375
376         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
377
378         log_string(f"test_perplexity {n_epoch} {test_perplexity}")
379
380     model.main_test_accuracy = main_test_accuracy
381
382
383 ######################################################################
384
385
386 def create_c_quizzes(
387     models,
388     quizz_machine,
389     nb_for_train=1000,
390     nb_for_test=100,
391     min_ave_seq_logproba=None,
392 ):
393     # We will store the generated quizzes for each number of
394     # correct prediction
395     recorded = dict([(n, []) for n in range(len(models) + 1)])
396
397     model_indexes = []
398     sum_logits, sum_nb_c_quizzes = 0, 0
399
400     def nb_generated():
401         return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
402
403     def nb_validated():
404         return sum(
405             [
406                 sum([x.size(0) for x in recorded[n]])
407                 for n in range(args.min_to_validate, args.max_to_validate + 1)
408             ]
409         )
410
411     nb_to_create = nb_for_train + nb_for_test
412
413     warnings.warn(
414         f"{args.nb_gpts=} {args.nb_models_for_generation=} {args.min_to_validate=} {args.max_to_validate=}"
415     )
416
417     while nb_validated() < nb_to_create:
418         (
419             new_c_quizzes,
420             nb_correct,
421             ave_seq_logproba,
422         ) = quizz_machine.gang_create_c_quizzes(
423             nb=nb_to_create,
424             nb_models_for_generation=args.nb_models_for_generation,
425             models=models,
426             mode=args.generation_mode,
427             reverse_cleanup=args.reverse_cleanup,
428             min_ave_seq_logproba=min_ave_seq_logproba,
429             n_epoch=n_epoch,
430             result_dir=args.result_dir,
431         )
432
433         sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
434         sum_nb_c_quizzes += new_c_quizzes.size(0)
435
436         if args.dirty_debug:
437             nb_correct = torch.randint(
438                 len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
439             )
440
441         for n in range(nb_correct.max() + 1):
442             recorded[n].append(new_c_quizzes[nb_correct == n].clone())
443
444         nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0)
445         nv = " ".join([str(x.item()) for x in nv])
446
447         log_string(f"keep c_quizzes kept {nv} total {nb_validated()} / {nb_to_create}")
448
449     # concatenate and shuffle
450     for n in recorded.keys():
451         if len(recorded[n]) > 0:
452             q = torch.cat(recorded[n], dim=0)
453             q = q[torch.randperm(q.size(0), device=q.device)]
454             recorded[n] = q
455         else:
456             del recorded[n]
457
458     new_c_quizzes = torch.cat(
459         [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
460         dim=0,
461     )
462
463     new_c_quizzes = new_c_quizzes[
464         torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
465             : nb_for_train + nb_for_test
466         ]
467     ]
468
469     quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
470     quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
471
472     for n in recorded.keys():
473         s = (
474             "_validated"
475             if n >= args.min_to_validate and n <= args.max_to_validate
476             else ""
477         )
478         quizz_machine.problem.save_quizzes(
479             recorded[n][:72],
480             args.result_dir,
481             f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
482         )
483
484     return sum_logits / sum_nb_c_quizzes
485
486
487 ######################################################################
488
489 models = []
490
491 for k in range(args.nb_gpts):
492     model = mygpt.MyGPT(
493         vocabulary_size=vocabulary_size,
494         dim_model=args.dim_model,
495         dim_keys=args.dim_keys,
496         dim_hidden=args.dim_hidden,
497         nb_heads=args.nb_heads,
498         nb_blocks=args.nb_blocks,
499         causal=True,
500         dropout=args.dropout,
501     ).to(device)
502
503     model.main_test_accuracy = 0.0
504     model.id = k
505
506     models.append(model)
507
508
509 nb_parameters = sum(p.numel() for p in models[0].parameters())
510 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
511
512 ######################################################################
513
514 min_ave_seq_logproba = None
515
516 for n_epoch in range(args.nb_epochs):
517     log_string(f"--- epoch {n_epoch} ----------------------------------------")
518
519     a = [(model.id, float(model.main_test_accuracy)) for model in models]
520     a.sort(key=lambda p: p[0])
521     s = " ".join([f"{p[1]*100:.02f}%" for p in a])
522     log_string(f"current accuracies {s}")
523
524     # select the model with lowest accuracy
525     models.sort(key=lambda model: model.main_test_accuracy)
526     model = models[0]
527
528     log_string(
529         f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
530     )
531
532     # improve it
533     one_epoch(model, quizz_machine)
534
535     quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
536
537     log_string(
538         f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
539     )
540
541     # test it
542     run_tests(model, quizz_machine, deterministic_synthesis=False)
543
544     log_string(
545         f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
546     )
547
548     if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
549         ave_seq_logproba = create_c_quizzes(
550             models,
551             quizz_machine,
552             nb_for_train=nb_new_c_quizzes_for_train,
553             nb_for_test=nb_new_c_quizzes_for_test,
554             min_ave_seq_logproba=min_ave_seq_logproba,
555         )
556
557         # We keep the first average logits as a reference
558         # if min_ave_seq_logproba is None:
559         # min_ave_seq_logproba = ave_seq_logproba
560         # else:
561         # log_string(
562         # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
563         # )
564
565         # We update everyone
566         for model in models:
567             run_tests(model, quizz_machine, deterministic_synthesis=False)
568
569
570 ######################################################################