Update.
[culture.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15
16 import mygpt
17 import sky, grids, quiz_machine
18
19 import threading
20
21 # world quizzes vs. culture quizzes
22
23 ######################################################################
24
25 if torch.cuda.is_available():
26     device = torch.device("cuda")
27     torch.backends.cuda.matmul.allow_tf32 = True
28 else:
29     device = torch.device("cpu")
30
31 ######################################################################
32
33 parser = argparse.ArgumentParser(
34     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
35 )
36
37 parser.add_argument("--log_filename", type=str, default="train.log")
38
39 parser.add_argument("--result_dir", type=str, default=None)
40
41 parser.add_argument("--seed", type=int, default=0)
42
43 parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1)
44
45 ########################################
46
47 parser.add_argument("--nb_epochs", type=int, default=10000)
48
49 parser.add_argument("--batch_size", type=int, default=None)
50
51 parser.add_argument("--physical_batch_size", type=int, default=None)
52
53 parser.add_argument("--nb_train_samples", type=int, default=None)
54
55 parser.add_argument("--nb_test_samples", type=int, default=None)
56
57 parser.add_argument("--learning_rate", type=float, default=5e-4)
58
59 ########################################
60
61 parser.add_argument("--model", type=str, default=None)
62
63 parser.add_argument("--dim_model", type=int, default=None)
64
65 parser.add_argument("--dim_keys", type=int, default=None)
66
67 parser.add_argument("--dim_hidden", type=int, default=None)
68
69 parser.add_argument("--nb_heads", type=int, default=None)
70
71 parser.add_argument("--nb_blocks", type=int, default=None)
72
73 parser.add_argument("--dropout", type=float, default=0.1)
74
75 ########################################
76
77 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
78
79 parser.add_argument("--problem", type=str, default="grids")
80
81 parser.add_argument("--nb_threads", type=int, default=1)
82
83 parser.add_argument("--nb_gpus", type=int, default=1)
84
85 parser.add_argument("--nb_gpts", type=int, default=5)
86
87 parser.add_argument("--min_to_validate", type=int, default=None)
88
89 parser.add_argument("--max_to_validate", type=int, default=None)
90
91 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.9)
92
93 parser.add_argument("--generation_temperature", type=float, default=2.0)
94
95 parser.add_argument("--dirty_debug", action="store_true", default=False)
96
97 ######################################################################
98
99 parser.add_argument("--sky_height", type=int, default=6)
100
101 parser.add_argument("--sky_width", type=int, default=8)
102
103 parser.add_argument("--sky_nb_birds", type=int, default=3)
104
105 parser.add_argument("--sky_nb_iterations", type=int, default=2)
106
107 parser.add_argument("--sky_speed", type=int, default=3)
108
109 ######################################################################
110
111 args = parser.parse_args()
112
113 if args.min_to_validate is None:
114     args.min_to_validate = args.nb_gpts - 1
115
116 if args.max_to_validate is None:
117     args.max_to_validate = args.nb_gpts - 1
118
119 if args.result_dir is None:
120     args.result_dir = f"results_culture"
121
122 ######################################################################
123
124 default_args = {
125     "model": "37M",
126     "batch_size": 25,
127     "nb_train_samples": 100000,
128     "nb_test_samples": 10000,
129 }
130
131 for k, v in default_args.items():
132     if getattr(args, k) is None:
133         setattr(args, k, v)
134
135 ######################################################################
136
137 default_model_args = {
138     "17K": {
139         "dim_model": 32,
140         "dim_keys": 32,
141         "dim_hidden": 32,
142         "nb_heads": 2,
143         "nb_blocks": 2,
144     },
145     "4M": {
146         "dim_model": 256,
147         "dim_keys": 32,
148         "dim_hidden": 1024,
149         "nb_heads": 4,
150         "nb_blocks": 6,
151     },
152     "37M": {
153         "dim_model": 512,
154         "dim_keys": 64,
155         "dim_hidden": 2048,
156         "nb_heads": 8,
157         "nb_blocks": 12,
158     },
159     "122M": {
160         "dim_model": 768,
161         "dim_keys": 64,
162         "dim_hidden": 2048,
163         "nb_heads": 8,
164         "nb_blocks": 24,
165     },
166     "352M": {
167         "dim_model": 1024,
168         "dim_keys": 64,
169         "dim_hidden": 2048,
170         "nb_heads": 8,
171         "nb_blocks": 48,
172     },
173 }
174
175 if args.model in default_model_args:
176     for k, v in default_model_args[args.model].items():
177         if getattr(args, k) is None:
178             setattr(args, k, v)
179 else:
180     raise ValueError(f"Unknown model {args.model}")
181
182 ######################################################################
183
184 try:
185     os.mkdir(args.result_dir)
186 except FileExistsError:
187     print(f"result directory {args.result_dir} already exists")
188     exit(1)
189
190 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
191
192 if args.seed >= 0:
193     # torch.backends.cudnn.deterministic = True
194     # torch.backends.cudnn.benchmark = False
195     # torch.use_deterministic_algorithms(True)
196     torch.manual_seed(args.seed)
197     if torch.cuda.is_available():
198         torch.cuda.manual_seed_all(args.seed)
199
200 ######################################################################
201
202
203 def log_string(s):
204     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
205
206     if log_file is not None:
207         log_file.write(t + s + "\n")
208         log_file.flush()
209
210     print(t + s)
211     sys.stdout.flush()
212
213
214 log_string(f"argv {' '.join(sys.argv)}")
215
216 for n in vars(args):
217     log_string(f"args.{n} {getattr(args, n)}")
218
219
220 ######################################################################
221
222 if args.dirty_debug:
223     args.nb_train_samples = 2500
224     args.nb_test_samples = 100
225
226 if args.physical_batch_size is None:
227     args.physical_batch_size = args.batch_size
228 else:
229     assert args.batch_size % args.physical_batch_size == 0
230
231 assert args.nb_train_samples % args.batch_size == 0
232 assert args.nb_test_samples % args.batch_size == 0
233
234 if args.problem == "sky":
235     problem = sky.Sky(
236         height=args.sky_height,
237         width=args.sky_width,
238         nb_birds=args.sky_nb_birds,
239         nb_iterations=args.sky_nb_iterations,
240         speed=args.sky_speed,
241         max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100,
242         chunk_size=100,
243         nb_threads=args.nb_threads,
244     )
245     back_accuracy = False
246 elif args.problem == "grids":
247     problem = grids.Grids(
248         max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100,
249         chunk_size=100,
250         nb_threads=args.nb_threads,
251     )
252     back_accuracy = True
253 else:
254     raise ValueError
255
256 quiz_machine = quiz_machine.QuizMachine(
257     problem=problem,
258     nb_train_samples=args.nb_train_samples,
259     nb_test_samples=args.nb_test_samples,
260     back_accuracy=back_accuracy,
261     batch_size=args.physical_batch_size,
262     result_dir=args.result_dir,
263     logger=log_string,
264     device=device,
265 )
266
267 ######################################################################
268
269 log_string(f"device {device}")
270
271 vocabulary_size = quiz_machine.vocabulary_size()
272
273 log_string(f"vocabulary_size {vocabulary_size}")
274
275 ######################################################################
276
277
278 ######################################################################
279
280
281 def run_tests(model, quiz_machine, deterministic_synthesis, local_device=None):
282     if local_device is None:
283         local_device = device
284
285     with torch.autograd.no_grad():
286         model.eval().to(local_device)
287
288         nb_test_samples, acc_test_loss = 0, 0.0
289         nb_samples_accumulated = 0
290
291         for input in quiz_machine.batches(model, split="test"):
292             input = input.to(local_device)
293
294             bs = model(mygpt.BracketedSequence(input))
295             output = bs.x
296
297             loss = F.cross_entropy(output.transpose(1, 2), input)
298
299             acc_test_loss += loss.item() * input.size(0)
300
301             nb_test_samples += input.size(0)
302
303         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
304
305         log_string(f"test_perplexity {n_epoch} {test_perplexity}")
306
307         model.main_test_accuracy = quiz_machine.produce_results(
308             n_epoch=n_epoch,
309             model=model,
310             result_dir=args.result_dir,
311             deterministic_synthesis=deterministic_synthesis,
312         )
313
314
315 def one_epoch(model, quiz_machine, local_device=None):
316     if local_device is None:
317         local_device = device
318
319     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
320
321     model.to(local_device).train()
322
323     nb_train_samples, acc_train_loss = 0, 0.0
324
325     for input in quiz_machine.batches(model, split="train"):
326         input = input.to(local_device)
327
328         if nb_train_samples % args.batch_size == 0:
329             optimizer.zero_grad()
330
331         output = model(mygpt.BracketedSequence(input)).x
332         loss = F.cross_entropy(output.transpose(1, 2), input)
333         acc_train_loss += loss.item() * input.size(0)
334
335         nb_train_samples += input.size(0)
336
337         loss.backward()
338
339         if nb_train_samples % args.batch_size == 0:
340             optimizer.step()
341
342     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
343
344     log_string(f"train_perplexity {n_epoch} {train_perplexity}")
345
346     run_tests(model, quiz_machine, deterministic_synthesis=False)
347
348     model.TRAINING_LOCK.release()
349
350
351 ######################################################################
352
353
354 def standard_validity(logproba):
355     l = logproba.sort(dim=-1).values
356     return (l[:, 0] < math.log(0.5)) & (l[:, 1] > math.log(0.99))
357     # warnings.warn("TEST!!!", RuntimeWarning)
358     # print(l.exp())
359     # return (l[:, 0] < math.log(0.99))
360
361
362 def valid_c_quizzes(recorded, criteria):
363     result = [q[criteria(lp)] for q, lp in recorded]
364     return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([])
365
366
367 ######################################################################
368
369
370 def create_c_quizzes(
371     models,
372     quiz_machine,
373     nb_for_train=1000,
374     nb_for_test=100,
375 ):
376     quizzes_and_logproba_records = []
377
378     nb_to_create = nb_for_train + nb_for_test
379
380     # ------------------------------------------------------------
381
382     file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat")
383
384     with open(file_name, "w") as logp_file:
385         while (
386             valid_c_quizzes(quizzes_and_logproba_records, standard_validity).size(0)
387             < nb_to_create
388         ):
389             # Select a model at random to generate the new quizzes
390
391             model_for_generation = models[torch.randint(len(models), (1,))]
392
393             c_quizzes = quiz_machine.generate_quizzes(
394                 nb_to_create,
395                 model_for_generation=model_for_generation,
396                 temperature=args.generation_temperature,
397             )
398
399             c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
400
401             if c_quizzes.size(0) > 0:
402                 logproba = quiz_machine.logproba_of_solutions(models, c_quizzes)
403                 for l in logproba:
404                     s = " ".join([str(x.item()) for x in l])
405                     logp_file.write(s + "\n")
406                 quizzes_and_logproba_records.append((c_quizzes, logproba))
407
408             nb_validated = valid_c_quizzes(
409                 quizzes_and_logproba_records, standard_validity
410             ).size(0)
411
412             log_string(
413                 f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
414             )
415
416     # store the new c_quizzes which have been validated
417
418     new_c_quizzes = valid_c_quizzes(quizzes_and_logproba_records, standard_validity)
419
420     quiz_machine.reverse_random_half_in_place(new_c_quizzes)
421
422     quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
423     quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
424
425     # save a bunch of images to investigate what quizzes with a
426     # certain nb of correct predictions look like
427
428     q = new_c_quizzes[:72]
429
430     if q.size(0) > 0:
431         quiz_machine.save_quizzes(args.result_dir, f"culture_c_quiz_{n_epoch:04d}", q)
432
433
434 ######################################################################
435
436 models = []
437
438 for k in range(args.nb_gpts):
439     log_string(f"creating model {k} and its w_quizzes")
440     model = mygpt.MyGPT(
441         vocabulary_size=vocabulary_size,
442         dim_model=args.dim_model,
443         dim_keys=args.dim_keys,
444         dim_hidden=args.dim_hidden,
445         nb_heads=args.nb_heads,
446         nb_blocks=args.nb_blocks,
447         causal=True,
448         dropout=args.dropout,
449     ).to(device)
450
451     model.main_test_accuracy = 0.0
452     model.id = k
453     model.TRAINING_LOCK = threading.Lock()
454
455     model.train_w_quizzes = quiz_machine.generate_token_sequences(
456         args.nb_train_samples
457     ).to(device)
458     quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
459     model.test_w_quizzes = quiz_machine.generate_token_sequences(
460         args.nb_test_samples
461     ).to(device)
462     quiz_machine.reverse_random_half_in_place(model.test_w_quizzes)
463
464     models.append(model)
465
466
467 nb_parameters = sum(p.numel() for p in models[0].parameters())
468 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
469
470 ######################################################################
471
472 # Compute the entropy of the training tokens
473
474 token_count = 0
475 for input in quiz_machine.batches(models[0], split="train", desc="train-entropy"):
476     token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
477         (0, 1)
478     )
479 token_probas = token_count / token_count.sum()
480 entropy = -torch.xlogy(token_probas, token_probas).sum()
481 train_set_perplexity = math.exp(entropy)
482
483 ######################################################################
484 # A bit of paranoia never hurts
485
486 if args.max_percents_of_test_in_train >= 0:
487
488     def subsets_as_tuples(batches, cs):
489         s = set()
490         for batch in batches:
491             for x in batch:
492                 s.add(tuple([v.item() for v in x]))
493                 if len(s) == cs:
494                     yield s
495                     s = set()
496         yield s
497
498     nb_test, nb_in_train = 0, 0
499     for test_subset in subsets_as_tuples(
500         quiz_machine.batches(models[0], split="test", desc="test-check"), 25000
501     ):
502         in_train = set()
503         for train_subset in subsets_as_tuples(
504             quiz_machine.batches(models[0], split="train", desc="train-check"), 25000
505         ):
506             in_train.update(test_subset.intersection(train_subset))
507         nb_in_train += len(in_train)
508         nb_test += len(test_subset)
509
510     log_string(
511         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
512     )
513
514     assert (
515         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
516     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
517
518 ######################################################################
519
520 nb_new_c_quizzes_for_train = args.nb_train_samples // 50
521 nb_new_c_quizzes_for_test = args.nb_test_samples // 50
522
523 log_string(
524     f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}"
525 )
526
527 ######################################################################
528
529 if args.dirty_debug:
530     args.accuracy_to_make_c_quizzes = 0.0
531     args.nb_gpts = 2
532     nb_new_c_quizzes_for_train = 100
533     nb_new_c_quizzes_for_test = 10
534
535     def standard_validity(logproba):
536         l = logproba.sort(dim=-1).values
537         return l[:, 0] < math.log(0.99)
538
539
540 ######################################################################
541
542 for n_epoch in range(args.nb_epochs):
543     log_string(f"--- epoch {n_epoch} ----------------------------------------")
544
545     cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
546     log_string(f"current_test_accuracies {cta}")
547
548     ##################################################
549     # Select, improve, and eval the worst models
550
551     ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
552
553     weakest_models = ranked_models[: args.nb_gpus]
554
555     for gpu_id, model in enumerate(weakest_models):
556         model.TRAINING_LOCK.acquire()
557
558         log_string(
559             f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
560         )
561
562         threading.Thread(
563             target=one_epoch, daemon=True, args=(model, quiz_machine, f"cuda:{gpu_id}")
564         ).start()
565
566     for model in weakest_models:
567         model.TRAINING_LOCK.acquire()
568         model.TRAINING_LOCK.release()
569
570     ##################################################
571     # Renew the train sets
572
573     log_string(
574         f"cache_w_quizzes contains {quiz_machine.problem.nb_cached_quizzes()} quizzes"
575     )
576
577     for model in weakest_models:
578         quiz_machine.renew_w_quizzes(model, args.nb_train_samples)
579
580     ##################################################
581     # If all the models are good enough, generate new quizzes and
582     # re-compute the test errors
583
584     if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
585         create_c_quizzes(
586             models,
587             quiz_machine,
588             nb_for_train=nb_new_c_quizzes_for_train,
589             nb_for_test=nb_new_c_quizzes_for_test,
590         )
591
592 ######################################################################