6b46fa0f23b68ca2c88466be37b60c069758bce6
[culture.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt
16 import sky, grids, quiz_machine
17
18 # world quizzes vs. culture quizzes
19
20 ######################################################################
21
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
24
25 ######################################################################
26
27 if torch.cuda.is_available():
28     device = torch.device("cuda")
29     torch.backends.cuda.matmul.allow_tf32 = True
30 else:
31     device = torch.device("cpu")
32
33 ######################################################################
34
35 parser = argparse.ArgumentParser(
36     description="An implementation of GPT with cache.",
37     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
38 )
39
40 parser.add_argument("--log_filename", type=str, default="train.log")
41
42 parser.add_argument("--result_dir", type=str, default=None)
43
44 parser.add_argument("--seed", type=int, default=0)
45
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47
48 ########################################
49
50 parser.add_argument("--nb_epochs", type=int, default=10000)
51
52 parser.add_argument("--batch_size", type=int, default=None)
53
54 parser.add_argument("--physical_batch_size", type=int, default=None)
55
56 parser.add_argument("--nb_train_samples", type=int, default=None)
57
58 parser.add_argument("--nb_test_samples", type=int, default=None)
59
60 parser.add_argument("--learning_rate", type=float, default=5e-4)
61
62 ########################################
63
64 parser.add_argument("--model", type=str, default=None)
65
66 parser.add_argument("--dim_model", type=int, default=None)
67
68 parser.add_argument("--dim_keys", type=int, default=None)
69
70 parser.add_argument("--dim_hidden", type=int, default=None)
71
72 parser.add_argument("--nb_heads", type=int, default=None)
73
74 parser.add_argument("--nb_blocks", type=int, default=None)
75
76 parser.add_argument("--dropout", type=float, default=0.1)
77
78 ########################################
79
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
81
82 parser.add_argument("--problem", type=str, default="grids")
83
84 parser.add_argument("--nb_gpts", type=int, default=5)
85
86 parser.add_argument("--min_to_validate", type=int, default=None)
87
88 parser.add_argument("--max_to_validate", type=int, default=None)
89
90 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
91
92 parser.add_argument("--generation_temperature", type=float, default=2.0)
93
94 parser.add_argument("--deterministic_validation", action="store_true", default=False)
95
96 parser.add_argument("--bidirectional_validation", action="store_true", default=False)
97
98 parser.add_argument("--dirty_debug", action="store_true", default=False)
99
100 ######################################################################
101
102 parser.add_argument("--sky_height", type=int, default=6)
103
104 parser.add_argument("--sky_width", type=int, default=8)
105
106 parser.add_argument("--sky_nb_birds", type=int, default=3)
107
108 parser.add_argument("--sky_nb_iterations", type=int, default=2)
109
110 parser.add_argument("--sky_speed", type=int, default=3)
111
112 ######################################################################
113
114 args = parser.parse_args()
115
116 if args.min_to_validate is None:
117     args.min_to_validate = args.nb_gpts - 1
118
119 if args.max_to_validate is None:
120     args.max_to_validate = args.nb_gpts - 1
121
122 if args.result_dir is None:
123     args.result_dir = f"results_culture"
124
125 ######################################################################
126
127 if args.dirty_debug:
128     args.accuracy_to_make_c_quizzes = 0.0
129     args.nb_gpts = 2
130     nb_new_c_quizzes_for_train = 100
131     nb_new_c_quizzes_for_test = 10
132
133 ######################################################################
134
135 default_args = {
136     "model": "37M",
137     "batch_size": 100,
138     "nb_train_samples": 100000,
139     "nb_test_samples": 10000,
140 }
141
142 for k, v in default_args.items():
143     if getattr(args, k) is None:
144         setattr(args, k, v)
145
146 ######################################################################
147
148 default_model_args = {
149     "17K": {
150         "dim_model": 32,
151         "dim_keys": 32,
152         "dim_hidden": 32,
153         "nb_heads": 2,
154         "nb_blocks": 2,
155     },
156     "4M": {
157         "dim_model": 256,
158         "dim_keys": 32,
159         "dim_hidden": 1024,
160         "nb_heads": 4,
161         "nb_blocks": 6,
162     },
163     "37M": {
164         "dim_model": 512,
165         "dim_keys": 64,
166         "dim_hidden": 2048,
167         "nb_heads": 8,
168         "nb_blocks": 12,
169     },
170     "122M": {
171         "dim_model": 768,
172         "dim_keys": 64,
173         "dim_hidden": 2048,
174         "nb_heads": 8,
175         "nb_blocks": 24,
176     },
177     "352M": {
178         "dim_model": 1024,
179         "dim_keys": 64,
180         "dim_hidden": 2048,
181         "nb_heads": 8,
182         "nb_blocks": 48,
183     },
184 }
185
186 if args.model in default_model_args:
187     for k, v in default_model_args[args.model].items():
188         if getattr(args, k) is None:
189             setattr(args, k, v)
190 else:
191     raise ValueError(f"Unknown model {args.model}")
192
193 ######################################################################
194
195 try:
196     os.mkdir(args.result_dir)
197 except FileExistsError:
198     print(f"result directory {args.result_dir} already exists")
199     exit(1)
200
201 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
202
203 if args.seed >= 0:
204     # torch.backends.cudnn.deterministic = True
205     # torch.backends.cudnn.benchmark = False
206     # torch.use_deterministic_algorithms(True)
207     torch.manual_seed(args.seed)
208     if torch.cuda.is_available():
209         torch.cuda.manual_seed_all(args.seed)
210
211 ######################################################################
212
213
214 def log_string(s):
215     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
216
217     if log_file is not None:
218         log_file.write(t + s + "\n")
219         log_file.flush()
220
221     print(t + s)
222     sys.stdout.flush()
223
224
225 log_string(f"argv {' '.join(sys.argv)}")
226
227 for n in vars(args):
228     log_string(f"args.{n} {getattr(args, n)}")
229
230
231 ######################################################################
232
233 if args.dirty_debug:
234     args.nb_train_samples = 2500
235     args.nb_test_samples = 100
236
237 if args.physical_batch_size is None:
238     args.physical_batch_size = args.batch_size
239 else:
240     assert args.batch_size % args.physical_batch_size == 0
241
242 assert args.nb_train_samples % args.batch_size == 0
243 assert args.nb_test_samples % args.batch_size == 0
244
245 if args.problem == "sky":
246     problem = sky.Sky(
247         height=args.sky_height,
248         width=args.sky_width,
249         nb_birds=args.sky_nb_birds,
250         nb_iterations=args.sky_nb_iterations,
251         speed=args.sky_speed,
252     )
253     back_accuracy = False
254 elif args.problem == "grids":
255     problem = grids.Grids(device=device)
256     back_accuracy = True
257 else:
258     raise ValueError
259
260 quiz_machine = quiz_machine.QuizMachine(
261     problem=problem,
262     nb_train_samples=args.nb_train_samples,
263     nb_test_samples=args.nb_test_samples,
264     back_accuracy=back_accuracy,
265     batch_size=args.physical_batch_size,
266     result_dir=args.result_dir,
267     logger=log_string,
268     device=device,
269 )
270
271 ######################################################################
272
273 log_string(f"device {device}")
274
275 vocabulary_size = quiz_machine.vocabulary_size()
276
277 log_string(f"vocabulary_size {vocabulary_size}")
278
279 ######################################################################
280
281 # Compute the entropy of the training tokens
282
283 token_count = 0
284 for input in quiz_machine.batches(split="train", desc="train-entropy"):
285     token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
286         (0, 1)
287     )
288 token_probas = token_count / token_count.sum()
289 entropy = -torch.xlogy(token_probas, token_probas).sum()
290 train_set_perplexity = math.exp(entropy)
291
292 ######################################################################
293 # A bit of paranoia never hurts
294
295 if args.max_percents_of_test_in_train >= 0:
296
297     def subsets_as_tuples(batches, cs):
298         s = set()
299         for batch in batches:
300             for x in batch:
301                 s.add(tuple([v.item() for v in x]))
302                 if len(s) == cs:
303                     yield s
304                     s = set()
305         yield s
306
307     nb_test, nb_in_train = 0, 0
308     for test_subset in subsets_as_tuples(
309         quiz_machine.batches(split="test", desc="test-check"), 25000
310     ):
311         in_train = set()
312         for train_subset in subsets_as_tuples(
313             quiz_machine.batches(split="train", desc="train-check"), 25000
314         ):
315             in_train.update(test_subset.intersection(train_subset))
316         nb_in_train += len(in_train)
317         nb_test += len(test_subset)
318
319     log_string(
320         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
321     )
322
323     assert (
324         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
325     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
326
327 ##############################
328
329
330 def one_epoch(model, quiz_machine):
331     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
332
333     model.train()
334
335     nb_train_samples, acc_train_loss = 0, 0.0
336
337     for input in quiz_machine.batches(split="train"):
338         input = input.to(device)
339
340         if nb_train_samples % args.batch_size == 0:
341             optimizer.zero_grad()
342
343         output = model(mygpt.BracketedSequence(input)).x
344         loss = F.cross_entropy(output.transpose(1, 2), input)
345         acc_train_loss += loss.item() * input.size(0)
346
347         nb_train_samples += input.size(0)
348
349         loss.backward()
350
351         if nb_train_samples % args.batch_size == 0:
352             optimizer.step()
353
354     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
355
356     log_string(f"train_perplexity {n_epoch} {train_perplexity}")
357
358
359 ######################################################################
360
361
362 def run_tests(model, quiz_machine, deterministic_synthesis):
363     with torch.autograd.no_grad():
364         model.eval()
365
366         nb_test_samples, acc_test_loss = 0, 0.0
367         nb_samples_accumulated = 0
368
369         for input in quiz_machine.batches(split="test"):
370             input = input.to(device)
371
372             bs = model(mygpt.BracketedSequence(input))
373             output = bs.x
374
375             loss = F.cross_entropy(output.transpose(1, 2), input)
376
377             acc_test_loss += loss.item() * input.size(0)
378
379             nb_test_samples += input.size(0)
380
381         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
382
383         log_string(f"test_perplexity {n_epoch} {test_perplexity}")
384
385         model.main_test_accuracy = quiz_machine.produce_results(
386             n_epoch=n_epoch,
387             model=model,
388             result_dir=args.result_dir,
389             deterministic_synthesis=deterministic_synthesis,
390         )
391
392
393 ######################################################################
394
395
396 def valid_c_quizzes(recorded, criteria):
397     result = [q[criteria(c)] for q, c in recorded]
398     return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([])
399
400
401 ######################################################################
402
403
404 def create_c_quizzes(
405     models,
406     quiz_machine,
407     nb_for_train=1000,
408     nb_for_test=100,
409 ):
410     quizzes_and_nb_correct_records = []
411
412     nb_to_create = nb_for_train + nb_for_test
413
414     # ------------------------------------------------------------
415
416     standard_validity = lambda nb_correct: torch.logical_and(
417         nb_correct >= args.min_to_validate, nb_correct <= args.max_to_validate
418     )
419
420     file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat")
421
422     with open(file_name, "w") as logp_file:
423         while (
424             valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity).size(0)
425             < nb_to_create
426         ):
427             # Select a model at random to generate the new quizzes
428
429             model_for_generation = models[torch.randint(len(models), (1,))]
430
431             c_quizzes = quiz_machine.generate_quizzes(
432                 nb_to_create,
433                 model_for_generation=model_for_generation,
434                 temperature=args.generation_temperature,
435             )
436
437             nb_correct, seq_logproba = quiz_machine.compute_correctness(
438                 c_quizzes,
439                 models,
440                 bidirectional_validation=args.bidirectional_validation,
441                 deterministic_validation=args.deterministic_validation,
442             )
443
444             for n, l in zip(nb_correct, seq_logproba):
445                 s = " ".join([str(x.item()) for x in l])
446                 logp_file.write(f"{n} {s}\n")
447
448             if args.dirty_debug:
449                 nb_correct = torch.randint(
450                     len(models) + 1, nb_correct.size(), device=c_quizzes.device
451                 )
452
453             quizzes_and_nb_correct_records.append((c_quizzes, nb_correct))
454
455             nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0)
456             nv = " ".join([str(x.item()) for x in nv])
457
458             nb_validated = valid_c_quizzes(
459                 quizzes_and_nb_correct_records, standard_validity
460             ).size(0)
461
462             log_string(
463                 f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
464             )
465
466     # store the new c_quizzes which have been validated
467
468     new_c_quizzes = valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity)
469
470     quiz_machine.reverse_random_half_in_place(new_c_quizzes)
471
472     quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
473     quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
474
475     # save a bunch of images to investigate what quizzes with a
476     # certain nb of correct predictions look like
477
478     for n in range(len(models) + 1):
479         s = (
480             "_validated"
481             if n >= args.min_to_validate and n <= args.max_to_validate
482             else ""
483         )
484
485         q = valid_c_quizzes(
486             quizzes_and_nb_correct_records, criteria=lambda nb_correct: nb_correct == n
487         )[:72]
488
489         quiz_machine.reverse_random_half_in_place(q)
490
491         if q.size(0) > 0:
492             quiz_machine.save_quizzes(
493                 args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q
494             )
495
496
497 ######################################################################
498
499 models = []
500
501 for k in range(args.nb_gpts):
502     model = mygpt.MyGPT(
503         vocabulary_size=vocabulary_size,
504         dim_model=args.dim_model,
505         dim_keys=args.dim_keys,
506         dim_hidden=args.dim_hidden,
507         nb_heads=args.nb_heads,
508         nb_blocks=args.nb_blocks,
509         causal=True,
510         dropout=args.dropout,
511     ).to(device)
512
513     model.main_test_accuracy = 0.0
514     model.id = k
515
516     models.append(model)
517
518
519 nb_parameters = sum(p.numel() for p in models[0].parameters())
520 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
521
522 ######################################################################
523
524 for n_epoch in range(args.nb_epochs):
525     log_string(f"--- epoch {n_epoch} ----------------------------------------")
526
527     cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
528     log_string(f"current_test_accuracies {cta}")
529
530     # Select, improve, and eval the worst model
531
532     weakest_model = min(models, key=lambda m: float(m.main_test_accuracy))
533
534     log_string(
535         f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}"
536     )
537
538     one_epoch(weakest_model, quiz_machine)
539
540     log_string(
541         f"train_set_composition w_quizzes {quiz_machine.nb_batch_w_quizzes} c_quizzes {quiz_machine.nb_batch_c_quizzes}"
542     )
543
544     run_tests(weakest_model, quiz_machine, deterministic_synthesis=False)
545
546     log_string(
547         f"test_set_composition w_quizzes {quiz_machine.nb_batch_w_quizzes} c_quizzes {quiz_machine.nb_batch_c_quizzes}"
548     )
549
550     # Replace a fraction of the w_quizzes with fresh ones
551
552     quiz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
553
554     # If all the models are good enough, generate new quizzes and
555     # re-compute the test errors
556
557     if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
558         create_c_quizzes(
559             models,
560             quiz_machine,
561             nb_for_train=nb_new_c_quizzes_for_train,
562             nb_for_test=nb_new_c_quizzes_for_test,
563         )
564
565         for model in models:
566             run_tests(model, quiz_machine, deterministic_synthesis=False)
567
568
569 ######################################################################