X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=73f61bf102dd46387bde2ecd99f0d7a74c2d7250;hb=a3211f96c7426a613b82a2de87d4dd70640e8f46;hp=eef84af68d4393b2a0b9adb1efb00743fb0318ee;hpb=5366dfd7bd57ec3298d1030f7d5327ff26bc5aad;p=picoclvr.git diff --git a/tasks.py b/tasks.py index eef84af..73f61bf 100755 --- a/tasks.py +++ b/tasks.py @@ -67,36 +67,152 @@ class Problem: def generate_sequences(self, nb): pass - def log_performance(self, sequences, logger): - pass + def seq2str(self, seq): + return "[NOT IMPLEMENTED]" + +#################### -class ProblemByheart(Problem): - def __init__(self): - nb_seq, len_prompt, len_result = 100, 5, 5 + +class ProblemLevel0(Problem): + def __init__(self, nb_sentences=100, len_prompt=5, len_result=5): self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result)) self.seq[:, len_prompt] = 10 def generate_sequences(self, nb): sequences = self.seq[torch.randint(self.seq.size(0), (nb,))] - ar_mask = (sequences==10).long() + ar_mask = (sequences == 10).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + +class ProblemLevel1(Problem): + def __init__(self, nb_operators=100, len_source=5, len_result=8): + self.len_source = len_source + self.len_result = len_result + self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1 + self.operators = F.one_hot( + torch.rand(nb_operators, len_result, len_source).argmax(-1), + num_classes=len_source, + ) + + def generate_sequences(self, nb): + nb_operators = torch.randint(self.operators.size(0), (nb,)) + operators = self.operators[nb_operators] + nb_operators = ( + nb_operators[:, None] + // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1) + ) % 10 + marker1 = torch.full((nb, 1), 10) + source = torch.randint(10, (nb, self.len_source)) + marker2 = torch.full((nb, 1), 11) + result = operators.bmm(source[:, :, None]).squeeze(-1) + print(f"{nb_operators.dtype=} {marker1.dtype=}") + sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1) + print(f"{sequences.size()=}") + ar_mask = (sequences == 11).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join("0123456789|>"[x.item()] for x in seq) + + +class ProblemLevel2(Problem): + def __init__(self, len_source=5, len_result=8): + self.len_source = len_source + self.len_result = len_result + + def generate_sequences(self, nb): + operators = F.one_hot( + torch.rand(nb, self.len_result, self.len_source).argmax(-1), + num_classes=self.len_source, + ) + source1 = torch.randint(10, (nb, self.len_source)) + marker1 = torch.full((nb, 1), 10) + result1 = operators.bmm(source1[:, :, None]).squeeze(-1) + marker2 = torch.full((nb, 1), 11) + source2 = torch.randint(10, (nb, self.len_source)) + marker3 = torch.full((nb, 1), 12) + result2 = operators.bmm(source2[:, :, None]).squeeze(-1) + + sequences = torch.cat( + (source1, marker1, result1, marker2, source2, marker3, result2), 1 + ) + ar_mask = (sequences == 12).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join("0123456789>|~"[x.item()] for x in seq) + + +#################### + + +class ProblemAddition(Problem): + def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False): + self.nb_digits = nb_digits + self.zero_padded = zero_padded + self.inverted_result = inverted_result + self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")]) + self.id2char = dict([(n, c) for c, n in self.char2id.items()]) + + def tensorize(self, strings): + len_max = max([len(x) for x in strings]) + return torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "$" * (len_max - len(s))] + for s in strings + ] + ) + ], + 0, + ) + + def generate_sequences(self, nb): + sequences = [] + for k in range(nb): + a, b = torch.randint(10**self.nb_digits, (2,)) + c = a + b + a, b, c = str(a.item()), str(b.item()), str(c.item()) + if self.zero_padded: + a = "0" * (self.nb_digits - len(a)) + a + b = "0" * (self.nb_digits - len(b)) + b + c = "0" * (self.nb_digits + 1 - len(c)) + c + if self.inverted_result: + c = c[::-1] + sequences.append(f"{a}+{b}={c}$") + + sequences = self.tensorize(sequences) + ar_mask = (sequences == self.char2id["="]).long() ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) return sequences, ar_mask - # problems = [ProblemByheart()] - # nb_common_codes = 100 + def seq2str(self, seq): + return "".join(self.id2char[x.item()] for x in seq) + + +# class ProblemUnion(Problem): +# problems = [ProblemByheart()] +# nb_common_codes = 100 - # def generate_sequences(nb_samples): - # problem_indexes = torch.randint(len(problems), (nb_samples,)) - # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) - # print(f"{nb_samples_per_problem}") - # all_seq = [] - # for nb, p in zip(nb_samples_per_problem, problems): - # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) - # return all_seq +# def generate_sequences(nb_samples): +# problem_indexes = torch.randint(len(problems), (nb_samples,)) +# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) +# print(f"{nb_samples_per_problem}") +# all_seq = [] +# for nb, p in zip(nb_samples_per_problem, problems): +# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) +# return all_seq + +# for strain, stest in zip(train_seq, test_seq): +# s = torch.cat((strain, stest), 0) + +#################### - # for strain, stest in zip(train_seq, test_seq): - # s = torch.cat((strain, stest), 0) class SandBox(Task): def __init__( @@ -107,17 +223,39 @@ class SandBox(Task): batch_size, logger=None, device=torch.device("cpu"), + max_nb_codes=1024, ): super().__init__() self.batch_size = batch_size self.device = device + self.problem = problem - self.train_input, self.train_ar_mask = problem.generate_sequences(nb_train_samples) - self.test_input, self.test_ar_mask = problem.generate_sequences(nb_test_samples) + self.train_input, self.train_ar_mask = self.problem.generate_sequences( + nb_train_samples + ) + self.test_input, self.test_ar_mask = self.problem.generate_sequences( + nb_test_samples + ) + + self.train_input, self.train_ar_mask = self.train_input.to( + device + ), self.train_ar_mask.to(device) + self.test_input, self.test_ar_mask = self.test_input.to( + device + ), self.test_ar_mask.to(device) self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + # A bit of paranoia never hurts + assert ( + self.nb_codes <= max_nb_codes + and self.train_input.min() >= 0 + and self.test_input.min() >= 0 + and tuple(self.train_ar_mask.unique()) == (0, 1) + and tuple(self.test_ar_mask.unique()) == (0, 1) + ) + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input @@ -134,11 +272,12 @@ class SandBox(Task): return self.nb_codes def produce_results( - self, n_epoch, model, result_dir, logger, deterministic_synthesis + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 ): + def compute_accuracy(input, ar_mask, logger=None): + input, ar_mask = input[:nmax], ar_mask[:nmax] + result = input.clone() * (1 - ar_mask) - def compute_accuracy(input, ar_mask): - result = input.clone() * (1-ar_mask) masked_inplace_autoregression( model, self.batch_size, @@ -149,23 +288,37 @@ class SandBox(Task): device=self.device, ) + if logger is not None: + for sp, st in zip(result[:10], input[:10]): + logger( + f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}" + ) + logger( + f" {n_epoch} ground truth {self.problem.seq2str(st)}" + ) + nb_total = ar_mask.sum().item() - nb_correct = ((result==input).long() * ar_mask).sum().item() + nb_correct = ((result == input).long() * ar_mask).sum().item() return nb_total, nb_correct - train_nb_total, train_nb_correct = compute_accuracy(self.train_input, self.train_ar_mask) + train_nb_total, train_nb_correct = compute_accuracy( + self.train_input, self.train_ar_mask + ) logger( f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" ) - test_nb_total, test_nb_correct = compute_accuracy(self.test_input, self.test_ar_mask) + test_nb_total, test_nb_correct = compute_accuracy( + self.test_input, self.test_ar_mask, logger + ) logger( f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + ###################################################################### import picoclvr @@ -1102,8 +1255,6 @@ class World(Task): device_storage=device_storage, ) - print(f"{train_action_seq.size()=}") - train_frame_seq = self.frame2seq(train_frames).to(device_storage) test_frame_seq = self.frame2seq(test_frames).to(device_storage) @@ -1115,7 +1266,7 @@ class World(Task): self.nb_codes = nb_frame_codes + nb_action_codes train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1) - print(f"{train_action_seq.device=} {nb_frame_codes.device=}") + train_action_seq += nb_frame_codes self.train_input = torch.cat( (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1 @@ -1174,7 +1325,6 @@ class World(Task): (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1 ) result = result.reshape(-1, result.size(-1)) - print(f"{result.size()=}") frames = self.seq2frame(result) image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")