X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=706e1d913c20be0bf9449f94bf3346658a5a1bdc;hb=8d2ebe29b48e3cf2f0a3937ab1e44d0e12a4924e;hp=8b57cb2056462d8265bb38c1dffb49c4d59dc41a;hpb=a92a5ca00f4277f7a133fa6cfaada2bc1981f524;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 8b57cb2..706e1d9 100755 --- a/tasks.py +++ b/tasks.py @@ -64,41 +64,168 @@ class Task: class Problem: - def generate(nb): + def generate_sequences(self, nb): pass - def perf(seq, logger): - pass + def seq2str(self, seq): + return "[NOT IMPLEMENTED]" + + +#################### + + +class ProblemLevel0(Problem): + def __init__(self, nb_sentences=100, len_prompt=5, len_result=5): + self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result)) + self.seq[:, len_prompt] = 10 + + def generate_sequences(self, nb): + sequences = self.seq[torch.randint(self.seq.size(0), (nb,))] + ar_mask = (sequences == 10).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + +class ProblemLevel1(Problem): + def __init__(self, nb_operators=100, len_source=5, len_result=8): + self.len_source = len_source + self.len_result = len_result + self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1 + self.operators = F.one_hot( + torch.rand(nb_operators, len_result, len_source).argmax(-1), + num_classes=len_source, + ) -class ProblemByheart(Problem): - def __init__(self): - pass + + def generate_sequences(self, nb): + nb_operators = torch.randint(self.operators.size(0), (nb,)) + operators = self.operators[nb_operators] + nb_operators = (nb_operators[:, None] // 10 ** torch.arange(self.len_nb_operator-1,-1,-1)) % 10 + marker1 = torch.full((nb,1),10) + source = torch.randint(10, (nb, self.len_source)) + marker2 = torch.full((nb,1),11) + result = operators.bmm(source[:, :, None]).squeeze(-1) + print(f"{nb_operators.dtype=} {marker1.dtype=}") + sequences = torch.cat((nb_operators, marker1, source,marker2,result),1) + print(f"{sequences.size()=}") + ar_mask = (sequences == 11).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join("0123456789|>"[x.item()] for x in seq) + + +#################### + + +class ProblemAddition(Problem): + def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False): + self.nb_digits = nb_digits + self.zero_padded = zero_padded + self.inverted_result = inverted_result + self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")]) + self.id2char = dict([(n, c) for c, n in self.char2id.items()]) + + def tensorize(self, strings): + len_max = max([len(x) for x in strings]) + return torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "$" * (len_max - len(s))] + for s in strings + ] + ) + ], + 0, + ) + + def generate_sequences(self, nb): + sequences = [] + for k in range(nb): + a, b = torch.randint(10**self.nb_digits, (2,)) + c = a + b + a, b, c = str(a.item()), str(b.item()), str(c.item()) + if self.zero_padded: + a = "0" * (self.nb_digits - len(a)) + a + b = "0" * (self.nb_digits - len(b)) + b + c = "0" * (self.nb_digits + 1 - len(c)) + c + if self.inverted_result: + c = c[::-1] + sequences.append(f"{a}+{b}={c}$") + + sequences = self.tensorize(sequences) + ar_mask = (sequences == self.char2id["="]).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join(self.id2char[x.item()] for x in seq) + + +# class ProblemUnion(Problem): +# problems = [ProblemByheart()] +# nb_common_codes = 100 + +# def generate_sequences(nb_samples): +# problem_indexes = torch.randint(len(problems), (nb_samples,)) +# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) +# print(f"{nb_samples_per_problem}") +# all_seq = [] +# for nb, p in zip(nb_samples_per_problem, problems): +# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) +# return all_seq + +# for strain, stest in zip(train_seq, test_seq): +# s = torch.cat((strain, stest), 0) + +#################### class SandBox(Task): def __init__( self, + problem, nb_train_samples, nb_test_samples, batch_size, logger=None, device=torch.device("cpu"), + max_nb_codes=1024, ): super().__init__() self.batch_size = batch_size + self.device = device + self.problem = problem - def generate_sequences(nb_samples): - problem_indexes = torch.randint(len(problems), (nb_samples,)) - nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) - print(f"{nb_samples_per_problem}") + self.train_input, self.train_ar_mask = self.problem.generate_sequences( + nb_train_samples + ) + self.test_input, self.test_ar_mask = self.problem.generate_sequences( + nb_test_samples + ) - self.train_input = generate_sequences(nb_train_samples) - self.test_input = generate_sequences(nb_test_samples) + self.train_input, self.train_ar_mask = self.train_input.to( + device + ), self.train_ar_mask.to(device) + self.test_input, self.test_ar_mask = self.test_input.to( + device + ), self.test_ar_mask.to(device) self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + # A bit of paranoia never hurts + assert ( + self.nb_codes <= max_nb_codes + and self.train_input.min() >= 0 + and self.test_input.min() >= 0 + and tuple(self.train_ar_mask.unique()) == (0, 1) + and tuple(self.test_ar_mask.unique()) == (0, 1) + ) + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input @@ -115,12 +242,51 @@ class SandBox(Task): return self.nb_codes def produce_results( - self, n_epoch, model, result_dir, logger, deterministic_synthesis + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 ): - # logger( - # f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" - # ) - pass + def compute_accuracy(input, ar_mask, logger=None): + input, ar_mask = input[:nmax], ar_mask[:nmax] + result = input.clone() * (1 - ar_mask) + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + progress_bar_desc=None, + device=self.device, + ) + + if logger is not None: + for sp, st in zip(result[:10], input[:10]): + logger( + f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}" + ) + logger( + f" {n_epoch} ground truth {self.problem.seq2str(st)}" + ) + + nb_total = ar_mask.sum().item() + nb_correct = ((result == input).long() * ar_mask).sum().item() + + return nb_total, nb_correct + + train_nb_total, train_nb_correct = compute_accuracy( + self.train_input, self.train_ar_mask + ) + + logger( + f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" + ) + + test_nb_total, test_nb_correct = compute_accuracy( + self.test_input, self.test_ar_mask, logger + ) + + logger( + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + ) ###################################################################### @@ -1059,8 +1225,6 @@ class World(Task): device_storage=device_storage, ) - print(f"{train_action_seq.size()=}") - train_frame_seq = self.frame2seq(train_frames).to(device_storage) test_frame_seq = self.frame2seq(test_frames).to(device_storage) @@ -1072,7 +1236,7 @@ class World(Task): self.nb_codes = nb_frame_codes + nb_action_codes train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1) - print(f"{train_action_seq.device=} {nb_frame_codes.device=}") + train_action_seq += nb_frame_codes self.train_input = torch.cat( (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1 @@ -1131,7 +1295,6 @@ class World(Task): (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1 ) result = result.reshape(-1, result.size(-1)) - print(f"{result.size()=}") frames = self.seq2frame(result) image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")