05c3557fe0e8158126506aad654a094645044b0a
[culture.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt, quizz_machine
16
17 # world quizzes vs. culture quizzes
18
19 ######################################################################
20
21 accuracy_to_make_c_quizzes = 0.975
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
24
25 ######################################################################
26
27 if torch.cuda.is_available():
28     device = torch.device("cuda")
29     torch.backends.cuda.matmul.allow_tf32 = True
30 else:
31     device = torch.device("cpu")
32
33 ######################################################################
34
35 parser = argparse.ArgumentParser(
36     description="An implementation of GPT with cache.",
37     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
38 )
39
40 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
41
42 parser.add_argument("--result_dir", type=str, default=None)
43
44 parser.add_argument("--seed", type=int, default=0)
45
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47
48 ########################################
49
50 parser.add_argument("--nb_epochs", type=int, default=10000)
51
52 parser.add_argument("--batch_size", type=int, default=None)
53
54 parser.add_argument("--physical_batch_size", type=int, default=None)
55
56 parser.add_argument("--nb_train_samples", type=int, default=None)
57
58 parser.add_argument("--nb_test_samples", type=int, default=None)
59
60 parser.add_argument("--learning_rate", type=float, default=1e-4)
61
62 ########################################
63
64 parser.add_argument("--model", type=str, default=None)
65
66 parser.add_argument("--dim_model", type=int, default=None)
67
68 parser.add_argument("--dim_keys", type=int, default=None)
69
70 parser.add_argument("--dim_hidden", type=int, default=None)
71
72 parser.add_argument("--nb_heads", type=int, default=None)
73
74 parser.add_argument("--nb_blocks", type=int, default=None)
75
76 parser.add_argument("--dropout", type=float, default=0.1)
77
78 ########################################
79
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
81
82 parser.add_argument("--nb_gpts", type=int, default=5)
83
84 parser.add_argument("--dirty_debug", action="store_true", default=False)
85
86 ######################################################################
87
88 args = parser.parse_args()
89
90 if args.result_dir is None:
91     args.result_dir = f"results_culture"
92
93 ######################################################################
94
95 if args.dirty_debug:
96     accuracy_to_make_c_quizzes = 0.0
97     nb_new_c_quizzes_for_train = 100
98     nb_new_c_quizzes_for_test = 10
99
100 ######################################################################
101
102 default_args = {
103     "model": "37M",
104     "batch_size": 100,
105     "nb_train_samples": 250000,
106     "nb_test_samples": 10000,
107 }
108
109 for k, v in default_args.items():
110     if getattr(args, k) is None:
111         setattr(args, k, v)
112
113 ######################################################################
114
115 default_model_args = {
116     "17K": {
117         "dim_model": 32,
118         "dim_keys": 32,
119         "dim_hidden": 32,
120         "nb_heads": 2,
121         "nb_blocks": 2,
122     },
123     "4M": {
124         "dim_model": 256,
125         "dim_keys": 32,
126         "dim_hidden": 1024,
127         "nb_heads": 4,
128         "nb_blocks": 6,
129     },
130     "37M": {
131         "dim_model": 512,
132         "dim_keys": 64,
133         "dim_hidden": 2048,
134         "nb_heads": 8,
135         "nb_blocks": 12,
136     },
137     "122M": {
138         "dim_model": 768,
139         "dim_keys": 64,
140         "dim_hidden": 2048,
141         "nb_heads": 8,
142         "nb_blocks": 24,
143     },
144     "352M": {
145         "dim_model": 1024,
146         "dim_keys": 64,
147         "dim_hidden": 2048,
148         "nb_heads": 8,
149         "nb_blocks": 48,
150     },
151 }
152
153 if args.model in default_model_args:
154     for k, v in default_model_args[args.model].items():
155         if getattr(args, k) is None:
156             setattr(args, k, v)
157 else:
158     raise ValueError(f"Unknown model {args.model}")
159
160 ######################################################################
161
162 try:
163     os.mkdir(args.result_dir)
164 except FileExistsError:
165     print(f"result directory {args.result_dir} already exists")
166     exit(1)
167
168 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
169
170 if args.seed >= 0:
171     # torch.backends.cudnn.deterministic = True
172     # torch.backends.cudnn.benchmark = False
173     # torch.use_deterministic_algorithms(True)
174     torch.manual_seed(args.seed)
175     if torch.cuda.is_available():
176         torch.cuda.manual_seed_all(args.seed)
177
178 ######################################################################
179
180
181 def log_string(s):
182     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
183
184     if log_file is not None:
185         log_file.write(t + s + "\n")
186         log_file.flush()
187
188     print(t + s)
189     sys.stdout.flush()
190
191
192 log_string(f"argv {' '.join(sys.argv)}")
193
194 for n in vars(args):
195     log_string(f"args.{n} {getattr(args, n)}")
196
197
198 ######################################################################
199
200 if args.dirty_debug:
201     args.nb_train_samples = 2500
202     args.nb_test_samples = 100
203
204 if args.physical_batch_size is None:
205     args.physical_batch_size = args.batch_size
206 else:
207     assert args.batch_size % args.physical_batch_size == 0
208
209 assert args.nb_train_samples % args.batch_size == 0
210 assert args.nb_test_samples % args.batch_size == 0
211
212 quizz_machine = quizz_machine.QuizzMachine(
213     nb_train_samples=args.nb_train_samples,
214     nb_test_samples=args.nb_test_samples,
215     batch_size=args.physical_batch_size,
216     result_dir=args.result_dir,
217     logger=log_string,
218     device=device,
219 )
220
221 ######################################################################
222
223 log_string(f"device {device}")
224
225 vocabulary_size = quizz_machine.vocabulary_size()
226
227 log_string(f"vocabulary_size {vocabulary_size}")
228
229 ######################################################################
230
231 # Compute the entropy of the training tokens
232
233 token_count = 0
234 for input in quizz_machine.batches(split="train", desc="train-entropy"):
235     token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
236         (0, 1)
237     )
238 token_probas = token_count / token_count.sum()
239 entropy = -torch.xlogy(token_probas, token_probas).sum()
240 train_set_perplexity = math.exp(entropy)
241
242 ######################################################################
243 # A bit of paranoia never hurts
244
245 if args.max_percents_of_test_in_train >= 0:
246
247     def subsets_as_tuples(batches, cs):
248         s = set()
249         for batch in batches:
250             for x in batch:
251                 s.add(tuple([v.item() for v in x]))
252                 if len(s) == cs:
253                     yield s
254                     s = set()
255         yield s
256
257     nb_test, nb_in_train = 0, 0
258     for test_subset in subsets_as_tuples(
259         quizz_machine.batches(split="test", desc="test-check"), 25000
260     ):
261         in_train = set()
262         for train_subset in subsets_as_tuples(
263             quizz_machine.batches(split="train", desc="train-check"), 25000
264         ):
265             in_train.update(test_subset.intersection(train_subset))
266         nb_in_train += len(in_train)
267         nb_test += len(test_subset)
268
269     log_string(
270         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
271     )
272
273     assert (
274         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
275     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
276
277 ##############################
278
279
280 def one_epoch(model, quizz_machine):
281     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
282
283     model.train()
284
285     nb_train_samples, acc_train_loss = 0, 0.0
286
287     for input in quizz_machine.batches(split="train"):
288         input = input.to(device)
289
290         if nb_train_samples % args.batch_size == 0:
291             optimizer.zero_grad()
292
293         output = model(mygpt.BracketedSequence(input)).x
294         loss = F.cross_entropy(output.transpose(1, 2), input)
295         acc_train_loss += loss.item() * input.size(0)
296
297         nb_train_samples += input.size(0)
298
299         loss.backward()
300
301         if nb_train_samples % args.batch_size == 0:
302             optimizer.step()
303
304     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
305
306     log_string(f"train_perplexity {n_epoch} {train_perplexity}")
307
308
309 ######################################################################
310
311
312 def run_tests(model, quizz_machine, deterministic_synthesis):
313     with torch.autograd.no_grad():
314         model.eval()
315
316         nb_test_samples, acc_test_loss = 0, 0.0
317         nb_samples_accumulated = 0
318
319         for input in quizz_machine.batches(split="test"):
320             input = input.to(device)
321
322             bs = model(mygpt.BracketedSequence(input))
323             output = bs.x
324
325             loss = F.cross_entropy(output.transpose(1, 2), input)
326
327             acc_test_loss += loss.item() * input.size(0)
328
329             nb_test_samples += input.size(0)
330
331         main_test_accuracy = quizz_machine.produce_results(
332             n_epoch=n_epoch,
333             model=model,
334             result_dir=args.result_dir,
335             logger=log_string,
336             deterministic_synthesis=deterministic_synthesis,
337         )
338
339         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
340
341         log_string(f"test_perplexity {n_epoch} {test_perplexity}")
342
343     model.main_test_accuracy = main_test_accuracy
344
345
346 ######################################################################
347
348
349 def create_c_quizzes(
350     model,
351     other_models,
352     quizz_machine,
353     nb_for_train=1000,
354     nb_for_test=100,
355     min_ave_seq_logproba=None,
356 ):
357     kept = []
358
359     sum_logits, sum_nb_c_quizzes = 0, 0
360
361     while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
362         nb_to_generate = 4 * (nb_for_train + nb_for_test)
363
364         new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
365             n_epoch=n_epoch,
366             result_dir=args.result_dir,
367             logger=log_string,
368             nb=nb_to_generate,
369             model=model,
370             other_models=other_models,
371             min_ave_seq_logproba=min_ave_seq_logproba,
372         )
373
374         sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
375         sum_nb_c_quizzes += new_c_quizzes.size(0)
376
377         to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
378
379         if args.dirty_debug:
380             to_keep = new_c_quizzes
381
382         log_string(
383             f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
384         )
385
386         kept.append(to_keep)
387
388     new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
389
390     quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
391     quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
392
393     quizz_machine.save_quizzes(
394         new_c_quizzes[:72],
395         args.result_dir,
396         f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
397         log_string,
398     )
399
400     return sum_logits / sum_nb_c_quizzes
401
402
403 ######################################################################
404
405 models = []
406
407 for k in range(args.nb_gpts):
408     model = mygpt.MyGPT(
409         vocabulary_size=vocabulary_size,
410         dim_model=args.dim_model,
411         dim_keys=args.dim_keys,
412         dim_hidden=args.dim_hidden,
413         nb_heads=args.nb_heads,
414         nb_blocks=args.nb_blocks,
415         causal=True,
416         dropout=args.dropout,
417     ).to(device)
418
419     model.main_test_accuracy = 0.0
420     model.id = k
421
422     models.append(model)
423
424
425 nb_parameters = sum(p.numel() for p in models[0].parameters())
426 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
427
428 ######################################################################
429
430 min_ave_seq_logproba = None
431
432 for n_epoch in range(args.nb_epochs):
433     log_string(f"--- epoch {n_epoch} ----------------------------------------")
434
435     a = [(model.id, float(model.main_test_accuracy)) for model in models]
436     a.sort(key=lambda p: p[0])
437     log_string(f"current accuracies {a}")
438
439     # select the model with lowest accuracy
440     models.sort(key=lambda model: model.main_test_accuracy)
441     model = models[0]
442
443     log_string(
444         f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
445     )
446
447     # improve it
448     one_epoch(model, quizz_machine)
449
450     quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
451
452     log_string(
453         f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
454     )
455
456     # test it
457     run_tests(model, quizz_machine, deterministic_synthesis=False)
458
459     log_string(
460         f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
461     )
462
463     if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
464         other_models = models.copy()
465         other_models.remove(model)
466
467         ave_seq_logproba = create_c_quizzes(
468             model,
469             other_models,
470             quizz_machine,
471             nb_for_train=nb_new_c_quizzes_for_train,
472             nb_for_test=nb_new_c_quizzes_for_test,
473             min_ave_seq_logproba=min_ave_seq_logproba,
474         )
475
476         # We keep the first average logits as a reference
477         if min_ave_seq_logproba is None:
478             min_ave_seq_logproba = ave_seq_logproba
479         else:
480             log_string(
481                 f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
482             )
483
484         # We update everyone
485         for model in models:
486             run_tests(model, quizz_machine, deterministic_synthesis=False)
487
488
489 ######################################################################