2afe61ba0deacebceaad889ffdaa509b4bc6bb2d
[culture.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt, tasks
16
17 ######################################################################
18
19 if torch.cuda.is_available():
20     device = torch.device("cuda")
21     torch.backends.cuda.matmul.allow_tf32 = True
22 else:
23     device = torch.device("cpu")
24
25 ######################################################################
26
27 parser = argparse.ArgumentParser(
28     description="An implementation of GPT with cache.",
29     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
30 )
31
32 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
33
34 parser.add_argument("--result_dir", type=str, default=None)
35
36 parser.add_argument("--seed", type=int, default=0)
37
38 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
39
40 ########################################
41
42 parser.add_argument("--nb_epochs", type=int, default=10000)
43
44 parser.add_argument("--batch_size", type=int, default=None)
45
46 parser.add_argument("--physical_batch_size", type=int, default=None)
47
48 parser.add_argument("--nb_train_samples", type=int, default=None)
49
50 parser.add_argument("--nb_test_samples", type=int, default=None)
51
52 parser.add_argument("--learning_rate", type=float, default=1e-4)
53
54 ########################################
55
56 parser.add_argument("--model", type=str, default=None)
57
58 parser.add_argument("--dim_model", type=int, default=None)
59
60 parser.add_argument("--dim_keys", type=int, default=None)
61
62 parser.add_argument("--dim_hidden", type=int, default=None)
63
64 parser.add_argument("--nb_heads", type=int, default=None)
65
66 parser.add_argument("--nb_blocks", type=int, default=None)
67
68 parser.add_argument("--dropout", type=float, default=0.1)
69
70 ########################################
71
72 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
73
74 parser.add_argument("--nb_gpts", type=int, default=5)
75
76 parser.add_argument("--check", action="store_true", default=False)
77
78 ######################################################################
79
80 args = parser.parse_args()
81
82 if args.result_dir is None:
83     args.result_dir = f"results_culture"
84
85 ######################################################################
86
87 default_args = {
88     "model": "37M",
89     "batch_size": 100,
90     "nb_train_samples": 250000,
91     "nb_test_samples": 10000,
92 }
93
94 for k, v in default_args.items():
95     if getattr(args, k) is None:
96         setattr(args, k, v)
97
98 ######################################################################
99
100 default_model_args = {
101     "17K": {
102         "dim_model": 32,
103         "dim_keys": 32,
104         "dim_hidden": 32,
105         "nb_heads": 2,
106         "nb_blocks": 2,
107     },
108     "4M": {
109         "dim_model": 256,
110         "dim_keys": 32,
111         "dim_hidden": 1024,
112         "nb_heads": 4,
113         "nb_blocks": 6,
114     },
115     "37M": {
116         "dim_model": 512,
117         "dim_keys": 64,
118         "dim_hidden": 2048,
119         "nb_heads": 8,
120         "nb_blocks": 12,
121     },
122     "122M": {
123         "dim_model": 768,
124         "dim_keys": 64,
125         "dim_hidden": 2048,
126         "nb_heads": 8,
127         "nb_blocks": 24,
128     },
129     "352M": {
130         "dim_model": 1024,
131         "dim_keys": 64,
132         "dim_hidden": 2048,
133         "nb_heads": 8,
134         "nb_blocks": 48,
135     },
136 }
137
138 if args.model in default_model_args:
139     for k, v in default_model_args[args.model].items():
140         if getattr(args, k) is None:
141             setattr(args, k, v)
142 else:
143     raise ValueError(f"Unknown model {args.model}")
144
145 ######################################################################
146
147 try:
148     os.mkdir(args.result_dir)
149 except FileExistsError:
150     print(f"result directory {args.result_dir} already exists")
151     exit(1)
152
153 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
154
155 if args.seed >= 0:
156     # torch.backends.cudnn.deterministic = True
157     # torch.backends.cudnn.benchmark = False
158     # torch.use_deterministic_algorithms(True)
159     torch.manual_seed(args.seed)
160     if torch.cuda.is_available():
161         torch.cuda.manual_seed_all(args.seed)
162
163 ######################################################################
164
165
166 def log_string(s):
167     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
168
169     if log_file is not None:
170         log_file.write(t + s + "\n")
171         log_file.flush()
172
173     print(t + s)
174     sys.stdout.flush()
175
176
177 log_string(f"argv {' '.join(sys.argv)}")
178
179 for n in vars(args):
180     log_string(f"args.{n} {getattr(args, n)}")
181
182
183 ######################################################################
184
185 if args.check:
186     args.nb_train_samples = 2500
187     args.nb_test_samples = 100
188
189 if args.physical_batch_size is None:
190     args.physical_batch_size = args.batch_size
191 else:
192     assert args.batch_size % args.physical_batch_size == 0
193
194 assert args.nb_train_samples % args.batch_size == 0
195 assert args.nb_test_samples % args.batch_size == 0
196
197 task = tasks.World(
198     nb_train_samples=args.nb_train_samples,
199     nb_test_samples=args.nb_test_samples,
200     batch_size=args.physical_batch_size,
201     result_dir=args.result_dir,
202     logger=log_string,
203     device=device,
204 )
205
206 ######################################################################
207
208 log_string(f"device {device}")
209
210 vocabulary_size = task.vocabulary_size()
211
212 log_string(f"vocabulary_size {vocabulary_size}")
213
214 ######################################################################
215
216 # Compute the entropy of the training tokens
217
218 token_count = 0
219 for input in task.batches(split="train", desc="train-entropy"):
220     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
221 token_probas = token_count / token_count.sum()
222 entropy = -torch.xlogy(token_probas, token_probas).sum()
223 train_set_perplexity = math.exp(entropy)
224
225 ######################################################################
226 # A bit of paranoia never hurts
227
228 if args.max_percents_of_test_in_train >= 0:
229
230     def subsets_as_tuples(batches, cs):
231         s = set()
232         for batch in batches:
233             for x in batch:
234                 s.add(tuple([v.item() for v in x]))
235                 if len(s) == cs:
236                     yield s
237                     s = set()
238         yield s
239
240     nb_test, nb_in_train = 0, 0
241     for test_subset in subsets_as_tuples(
242         task.batches(split="test", desc="test-check"), 25000
243     ):
244         in_train = set()
245         for train_subset in subsets_as_tuples(
246             task.batches(split="train", desc="train-check"), 25000
247         ):
248             in_train.update(test_subset.intersection(train_subset))
249         nb_in_train += len(in_train)
250         nb_test += len(test_subset)
251
252     log_string(
253         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
254     )
255
256     assert (
257         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
258     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
259
260 ##############################
261
262
263 def one_epoch(model, task):
264     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
265
266     model.train()
267
268     nb_train_samples, acc_train_loss = 0, 0.0
269
270     for input in task.batches(split="train"):
271         input = input.to(device)
272
273         if nb_train_samples % args.batch_size == 0:
274             optimizer.zero_grad()
275
276         output = model(mygpt.BracketedSequence(input)).x
277         loss = F.cross_entropy(output.transpose(1, 2), input)
278         acc_train_loss += loss.item() * input.size(0)
279
280         nb_train_samples += input.size(0)
281
282         loss.backward()
283
284         if nb_train_samples % args.batch_size == 0:
285             optimizer.step()
286
287     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
288
289     log_string(f"train_perplexity {n_epoch} {train_perplexity}")
290
291
292 ######################################################################
293
294
295 def run_tests(model, task, deterministic_synthesis):
296     with torch.autograd.no_grad():
297         model.eval()
298
299         nb_test_samples, acc_test_loss = 0, 0.0
300         nb_samples_accumulated = 0
301
302         for input in task.batches(split="test"):
303             input = input.to(device)
304
305             bs = model(mygpt.BracketedSequence(input))
306             output = bs.x
307
308             loss = F.cross_entropy(output.transpose(1, 2), input)
309
310             acc_test_loss += loss.item() * input.size(0)
311
312             nb_test_samples += input.size(0)
313
314         main_test_accuracy = task.produce_results(
315             n_epoch=n_epoch,
316             model=model,
317             result_dir=args.result_dir,
318             logger=log_string,
319             deterministic_synthesis=deterministic_synthesis,
320         )
321
322         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
323
324         log_string(f"test_perplexity {n_epoch} {test_perplexity}")
325
326     model.main_test_accuracy = main_test_accuracy
327
328
329 ######################################################################
330
331
332 def create_quizzes(
333     model,
334     other_models,
335     task,
336     nb_for_train=1000,
337     nb_for_test=100,
338     desired_average_logits=None,
339 ):
340     kept = []
341     nb_generated_tokens, sum_logits = 0, 0
342
343     while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
344         nb_to_generate = 4 * (nb_for_train + nb_for_test)
345         new_quizzes, nb_correct, average_logits = task.create_new_quizzes(
346             n_epoch=n_epoch,
347             result_dir=args.result_dir,
348             logger=log_string,
349             nb=nb_to_generate,
350             model=model,
351             other_models=other_models,
352             desired_average_logits=desired_average_logits,
353         )
354
355         nb_generated_tokens += new_quizzes.numel()
356         sum_logits += average_logits * new_quizzes.numel()
357
358         to_keep = new_quizzes[nb_correct == len(other_models) - 1]
359         log_string(
360             f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)"
361         )
362         kept.append(to_keep)
363
364     new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
365
366     task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True)
367     task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False)
368
369     task.save_image(
370         new_quizzes[:72],
371         args.result_dir,
372         f"world_quiz_{n_epoch:04d}_{model.id:02d}.png",
373         log_string,
374     )
375
376     return sum_logits / nb_generated_tokens
377
378
379 ######################################################################
380
381 models = []
382
383 for k in range(args.nb_gpts):
384     model = mygpt.MyGPT(
385         vocabulary_size=vocabulary_size,
386         dim_model=args.dim_model,
387         dim_keys=args.dim_keys,
388         dim_hidden=args.dim_hidden,
389         nb_heads=args.nb_heads,
390         nb_blocks=args.nb_blocks,
391         causal=True,
392         dropout=args.dropout,
393     ).to(device)
394
395     model.main_test_accuracy = 0.0
396     model.id = k
397
398     models.append(model)
399
400
401 nb_parameters = sum(p.numel() for p in models[0].parameters())
402 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
403
404 ######################################################################
405
406 accuracy_to_make_quizzes = 0.975
407 nb_new_quizzes_for_train = 1000
408 nb_new_quizzes_for_test = 100
409
410 if args.check:
411     accuracy_to_make_quizzes = 0.0
412     nb_new_quizzes_for_train = 10
413     nb_new_quizzes_for_test = 10
414
415 desired_average_logits = None
416
417 for n_epoch in range(args.nb_epochs):
418     log_string(f"--- epoch {n_epoch} ----------------------------------------")
419
420     a = [(model.id, float(model.main_test_accuracy)) for model in models]
421     a.sort(key=lambda p: p[0])
422     log_string(f"current accuracies {a}")
423
424     # select the model with lowest accuracy
425     models.sort(key=lambda model: model.main_test_accuracy)
426     model = models[0]
427
428     log_string(
429         f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
430     )
431
432     # improve it
433     one_epoch(model, task)
434
435     task.renew_samples(args.nb_train_samples // args.nb_gpts)
436
437     log_string(
438         f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
439     )
440
441     # test it
442     run_tests(model, task, deterministic_synthesis=False)
443
444     log_string(
445         f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
446     )
447
448     if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes:
449         other_models = models.copy()
450         other_models.remove(model)
451
452         average_logits = create_quizzes(
453             model,
454             other_models,
455             task,
456             nb_for_train=nb_new_quizzes_for_train,
457             nb_for_test=nb_new_quizzes_for_test,
458             desired_average_logits=desired_average_logits,
459         )
460
461         # We keep the first average logits as a reference
462         if desired_average_logits is None:
463             desired_average_logits = average_logits
464         else:
465             log_string(
466                 f"desired_average_logits {desired_average_logits} average_logits {average_logits}"
467             )
468
469         # We update everyone
470         for model in models:
471             run_tests(model, task, deterministic_synthesis=False)
472
473
474 ######################################################################