# Written by Francois Fleuret <francois@fleuret.org>
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime
import torch, torchvision
from torch import nn
"nb_test_samples": 10000,
},
"memory": {
- "model": "4M",
+ "model": "37M",
"batch_size": 100,
- "nb_train_samples": 5000,
+ "nb_train_samples": 25000,
"nb_test_samples": 1000,
},
"mixing": {
deterministic_synthesis=args.deterministic_synthesis,
)
+time_pred_result = None
+
for n_epoch in range(nb_epochs_finished, nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
deterministic_synthesis=args.deterministic_synthesis,
)
+ time_current_result = datetime.datetime.now()
+ if time_pred_result is not None:
+ log_string(
+ f"next_result {time_current_result + (time_current_result - time_pred_result)}"
+ )
+ time_pred_result = time_current_result
+
checkpoint = {
"nb_epochs_finished": n_epoch + 1,
"model_state": model.state_dict(),
from mygpt import BracketedSequence
-try:
- from graph import save_attention_image
-except ImportError:
- save_attention_image = None
+# from graph import save_attention_image
+save_attention_image = None
######################################################################
logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
- if save_attention_image is None:
- logger("no save_attention_image (is pycairo installed?)")
- else:
+ if save_attention_image is not None:
for k in range(10):
ns = torch.randint(self.test_input.size(0), (1,)).item()
input = self.test_input[ns : ns + 1].clone()
logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
+ if n_epoch == 5 or n_epoch == 10 or n_epoch == 20:
+ if save_attention_image is None:
+ logger("no save_attention_image (is pycairo installed?)")
+ else:
+ for k in range(10):
+ ns = k # torch.randint(self.test_input.size(0), (1,)).item()
+ input = self.test_input[ns : ns + 1].clone()
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+ model.record_attention(True)
+ model(BracketedSequence(input))
+ model.train(t)
+ ram = model.retrieve_attention()
+ model.record_attention(False)
+
+ tokens_output = [self.id2token[t.item()] for t in input[0]]
+ tokens_input = ["n/a"] + tokens_output[:-1]
+ for n_head in range(ram[0].size(1)):
+ filename = os.path.join(
+ result_dir,
+ f"sandbox_attention_epoch_{n_epoch}_sample_{k}_head_{n_head}.pdf",
+ )
+ attention_matrices = [m[0, n_head] for m in ram]
+ save_attention_image(
+ filename,
+ tokens_input,
+ tokens_output,
+ attention_matrices,
+ k_top=10,
+ # min_total_attention=0.9,
+ token_gap=12,
+ layer_gap=50,
+ )
+ logger(f"wrote {filename}")
+
######################################################################