X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=problems.py;h=2c8602c6f8a67f91cd491772406805a1e4e96ae2;hb=6681907dcc86bf6e159925814d419f522e0e3300;hp=78bb64e601785bf2e116be86bd14faa4d7021696;hpb=db7cefe4fefb381e56f1292d5bbe4a18c76afb47;p=picoclvr.git diff --git a/problems.py b/problems.py index 78bb64e..2c8602c 100755 --- a/problems.py +++ b/problems.py @@ -21,8 +21,55 @@ class Problem: #################### -class ProblemLevel0(Problem): - def __init__(self, nb_sentences=100, len_prompt=5, len_result=5): +class ProblemTwoTargets(Problem): + def __init__(self, len_total=10, len_targets=3): + assert len_targets >= 3 + assert len_total >= 3 * len_targets - 1 + self.len_total = len_total + self.len_targets = len_targets + + def generate_sequences(self, nb): + k = torch.arange(self.len_total)[None, :] + s = torch.randint(10, (nb, self.len_total)) + l = torch.rand(nb, self.len_total) + l = l * (k <= self.len_total - self.len_targets).long() + k1 = l.argmax(dim=1, keepdim=True) + m = (k != k1).long() * (k != k1 + self.len_targets - 1).long() + s = s * m + 10 * (1 - m) + l = l * ( + 1 + - (k + self.len_targets - 1 >= k1).long() + * (k < k1 + self.len_targets).long() + ) + k2 = l.argmax(dim=1, keepdim=True) + m = (k != k2).long() * (k != k2 + self.len_targets - 1).long() + s = s * m + 11 * (1 - m) + a1 = s.gather(dim=1, index=k1 + 1 + torch.arange(self.len_targets - 2)[None, :]) + a2 = s.gather(dim=1, index=k2 + 1 + torch.arange(self.len_targets - 2)[None, :]) + sequences = torch.cat( + ( + s, + torch.full((nb, 1), 12), + a1, + torch.full((nb, 1), 12), + a2, + torch.full((nb, 1), 12), + ), + 1, + ) + ar_mask = (sequences == 12).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join("0123456789-+|"[x.item()] for x in seq) + + +#################### + + +class ProblemByHeart(Problem): + def __init__(self, nb_sentences=100, len_prompt=8, len_result=8): self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result)) self.seq[:, len_prompt] = 10 @@ -32,9 +79,15 @@ class ProblemLevel0(Problem): ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) return sequences, ar_mask + def seq2str(self, seq): + return "".join("0123456789|"[x.item()] for x in seq) + + +#################### + -class ProblemLevel1(Problem): - def __init__(self, nb_operators=100, len_source=5, len_result=8): +class ProblemLearnOperator(Problem): + def __init__(self, nb_operators=100, len_source=6, len_result=9): self.len_source = len_source self.len_result = len_result self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1 @@ -51,7 +104,6 @@ class ProblemLevel1(Problem): // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1) ) % 10 marker1 = torch.full((nb, 1), 10) - # source = torch.randint(10, (nb, self.len_source)) source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source] marker2 = torch.full((nb, 1), 11) result = operators.bmm(source[:, :, None]).squeeze(-1) @@ -64,7 +116,10 @@ class ProblemLevel1(Problem): return "".join("0123456789|>"[x.item()] for x in seq) -class ProblemLevel2(Problem): +#################### + + +class ProblemGuessOperator(Problem): def __init__(self, len_source=5, len_result=8): self.len_source = len_source self.len_result = len_result @@ -141,19 +196,8 @@ class ProblemAddition(Problem): return "".join(self.id2char[x.item()] for x in seq) -# class ProblemUnion(Problem): -# problems = [ProblemByheart()] -# nb_common_codes = 100 - -# def generate_sequences(nb_samples): -# problem_indexes = torch.randint(len(problems), (nb_samples,)) -# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) -# print(f"{nb_samples_per_problem}") -# all_seq = [] -# for nb, p in zip(nb_samples_per_problem, problems): -# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) -# return all_seq - -# for strain, stest in zip(train_seq, test_seq): -# s = torch.cat((strain, stest), 0) - +if __name__ == "__main__": + p = ProblemTwoTargets(12, 4) + s, m = p.generate_sequences(10) + for x in s: + print(p.seq2str(x))