X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=421aee49f2f0005f7650ea9836fde801fc5598e8;hb=db7cefe4fefb381e56f1292d5bbe4a18c76afb47;hp=0c92af923dd2c9b8bfd7785a67556de402b04809;hpb=3b9ba21fd3d06a20703216cc0a77fe9dc78b079f;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 0c92af9..421aee4 100755 --- a/tasks.py +++ b/tasks.py @@ -72,159 +72,9 @@ class Task: pass -###################################################################### - - -class Problem: - def generate_sequences(self, nb): - pass - - def seq2str(self, seq): - return "[NOT IMPLEMENTED]" - - -#################### - - -class ProblemLevel0(Problem): - def __init__(self, nb_sentences=100, len_prompt=5, len_result=5): - self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result)) - self.seq[:, len_prompt] = 10 - - def generate_sequences(self, nb): - sequences = self.seq[torch.randint(self.seq.size(0), (nb,))] - ar_mask = (sequences == 10).long() - ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) - return sequences, ar_mask - - -class ProblemLevel1(Problem): - def __init__(self, nb_operators=100, len_source=5, len_result=8): - self.len_source = len_source - self.len_result = len_result - self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1 - self.operators = F.one_hot( - torch.rand(nb_operators, len_result, len_source).argmax(-1), - num_classes=len_source, - ) - - def generate_sequences(self, nb): - nb_operators = torch.randint(self.operators.size(0), (nb,)) - operators = self.operators[nb_operators] - nb_operators = ( - nb_operators[:, None] - // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1) - ) % 10 - marker1 = torch.full((nb, 1), 10) - # source = torch.randint(10, (nb, self.len_source)) - source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source] - marker2 = torch.full((nb, 1), 11) - result = operators.bmm(source[:, :, None]).squeeze(-1) - sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1) - ar_mask = (sequences == 11).long() - ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) - return sequences, ar_mask - - def seq2str(self, seq): - return "".join("0123456789|>"[x.item()] for x in seq) - - -class ProblemLevel2(Problem): - def __init__(self, len_source=5, len_result=8): - self.len_source = len_source - self.len_result = len_result - - def generate_sequences(self, nb): - operators = F.one_hot( - torch.rand(nb, self.len_result, self.len_source).argmax(-1), - num_classes=self.len_source, - ) - source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source] - # source1 = torch.randint(10, (nb, self.len_source)) - marker1 = torch.full((nb, 1), 10) - result1 = operators.bmm(source1[:, :, None]).squeeze(-1) - marker2 = torch.full((nb, 1), 11) - source2 = torch.randint(10, (nb, self.len_source)) - marker3 = torch.full((nb, 1), 12) - result2 = operators.bmm(source2[:, :, None]).squeeze(-1) - - sequences = torch.cat( - (source1, marker1, result1, marker2, source2, marker3, result2), 1 - ) - ar_mask = (sequences == 12).long() - ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) - return sequences, ar_mask - - def seq2str(self, seq): - return "".join("0123456789>|~"[x.item()] for x in seq) - - -#################### - - -class ProblemAddition(Problem): - def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False): - self.nb_digits = nb_digits - self.zero_padded = zero_padded - self.inverted_result = inverted_result - self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")]) - self.id2char = dict([(n, c) for c, n in self.char2id.items()]) - - def tensorize(self, strings): - len_max = max([len(x) for x in strings]) - return torch.cat( - [ - torch.tensor( - [ - [self.char2id[c] for c in s + "$" * (len_max - len(s))] - for s in strings - ] - ) - ], - 0, - ) - - def generate_sequences(self, nb): - sequences = [] - for k in range(nb): - a, b = torch.randint(10**self.nb_digits, (2,)) - c = a + b - a, b, c = str(a.item()), str(b.item()), str(c.item()) - if self.zero_padded: - a = "0" * (self.nb_digits - len(a)) + a - b = "0" * (self.nb_digits - len(b)) + b - c = "0" * (self.nb_digits + 1 - len(c)) + c - if self.inverted_result: - c = c[::-1] - sequences.append(f"{a}+{b}={c}$") - - sequences = self.tensorize(sequences) - ar_mask = (sequences == self.char2id["="]).long() - ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) - return sequences, ar_mask - - def seq2str(self, seq): - return "".join(self.id2char[x.item()] for x in seq) - - -# class ProblemUnion(Problem): -# problems = [ProblemByheart()] -# nb_common_codes = 100 - -# def generate_sequences(nb_samples): -# problem_indexes = torch.randint(len(problems), (nb_samples,)) -# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) -# print(f"{nb_samples_per_problem}") -# all_seq = [] -# for nb, p in zip(nb_samples_per_problem, problems): -# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) -# return all_seq - -# for strain, stest in zip(train_seq, test_seq): -# s = torch.cat((strain, stest), 0) - #################### +import problems class SandBox(Task): def __init__( @@ -1284,44 +1134,36 @@ class RPL(Task): ) if save_attention_image is not None: - input = self.test_input[:1] - result = input.clone() - s = (result == self.t_prog).long() - ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1) - result = (1 - ar_mask) * result + ar_mask * self.t_nul - - masked_inplace_autoregression( - model, - self.batch_size, - result, - ar_mask, - deterministic_synthesis, - device=self.device, - ) + ns=torch.randint(self.test_input.size(0),(1,)).item() + input = self.test_input[ns:ns+1].clone() + last = (input != self.t_nul).max(0).values.nonzero().max() + 3 + input = input[:, :last].to(self.device) with torch.autograd.no_grad(): t = model.training model.eval() model.record_attention(True) - model(BracketedSequence(result)) + model(BracketedSequence(input)) model.train(t) ram = model.retrieve_attention() model.record_attention(False) - tokens_output = [self.id2token[i.item()] for i in result[0]] + tokens_output = [self.id2token[i.item()] for i in input[0]] tokens_input = ["n/a"] + tokens_output[:-1] for n_head in range(ram[0].size(1)): - filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf" + filename = os.path.join( + result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf" + ) attention_matrices = [m[0, n_head] for m in ram] save_attention_image( filename, tokens_input, tokens_output, attention_matrices, - token_gap=12, - layer_gap=50, k_top=10, # min_total_attention=0.9, + token_gap=12, + layer_gap=50, ) logger(f"wrote {filename}")