X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=332d6c5a93f4768dfd3e409389887baf3f8d2601;hb=ead4b8e4edd29578c01501d168e416b47fa4047b;hp=82d965b040becefc4f4933e055fc9e19d3a6976e;hpb=994d2408781ebaed6da16b10b2b3ebedeff82756;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 82d965b..332d6c5 100755 --- a/tasks.py +++ b/tasks.py @@ -20,6 +20,8 @@ def masked_inplace_autoregression( progress_bar_desc="autoregression", device=torch.device("cpu"), ): + assert input.size() == ar_mask.size() + batches = zip(input.split(batch_size), ar_mask.split(batch_size)) if progress_bar_desc is not None: @@ -27,7 +29,7 @@ def masked_inplace_autoregression( batches, dynamic_ncols=True, desc=progress_bar_desc, - total=input.size(0) // batch_size, + # total=input.size(0) // batch_size, ) with torch.autograd.no_grad(): @@ -58,6 +60,236 @@ class Task: pass +###################################################################### + + +class Problem: + def generate_sequences(self, nb): + pass + + def seq2str(self, seq): + return "[NOT IMPLEMENTED]" + + +#################### + + +class ProblemLevel0(Problem): + def __init__(self, nb_sentences=100, len_prompt=5, len_result=5): + self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result)) + self.seq[:, len_prompt] = 10 + + def generate_sequences(self, nb): + sequences = self.seq[torch.randint(self.seq.size(0), (nb,))] + ar_mask = (sequences == 10).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + +class ProblemLevel1(Problem): + def __init__(self, nb_operators=100, len_prompt=5, len_result=8): + self.len_prompt = len_prompt + self.len_result = len_result + self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1 + self.operators = F.one_hot( + torch.rand(nb_operators, len_result, len_prompt).argmax(-1), + num_classes=len_prompt, + ) + + def generate_sequences(self, nb): + a = self.len_nb_operator + b = a + 1 + self.len_prompt + sequences = torch.empty(nb, b + 1 + self.len_result, dtype=torch.int64) + nb_operators = torch.randint(self.operators.size(0), (nb,)) + sequences[:, :a] = (nb_operators[:, None] / 10 ** torch.arange(a)) % 10 + sequences[:, a] = 10 + sequences[:, a + 1 : b] = torch.randint(10, (nb, b - a - 1)) + sequences[:, b] = 11 + + o = self.operators[nb_operators] + p = sequences[:, a + 1 : b] + print(f"{o.size()=} {p.size()=} {sequences[:,b+1:].size()=}") + sequences[:, b + 1 :] = o.bmm(p[:, :, None]).squeeze(-1) + ar_mask = (sequences == 11).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join(self.id2char[x.item()] for x in seq) + + +#################### + + +class ProblemAddition(Problem): + def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False): + self.nb_digits = nb_digits + self.zero_padded = zero_padded + self.inverted_result = inverted_result + self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")]) + self.id2char = dict([(n, c) for c, n in self.char2id.items()]) + + def tensorize(self, strings): + len_max = max([len(x) for x in strings]) + return torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "$" * (len_max - len(s))] + for s in strings + ] + ) + ], + 0, + ) + + def generate_sequences(self, nb): + sequences = [] + for k in range(nb): + a, b = torch.randint(10**self.nb_digits, (2,)) + c = a + b + a, b, c = str(a.item()), str(b.item()), str(c.item()) + if self.zero_padded: + a = "0" * (self.nb_digits - len(a)) + a + b = "0" * (self.nb_digits - len(b)) + b + c = "0" * (self.nb_digits + 1 - len(c)) + c + if self.inverted_result: + c = c[::-1] + sequences.append(f"{a}+{b}={c}$") + + sequences = self.tensorize(sequences) + ar_mask = (sequences == self.char2id["="]).long() + ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1) + return sequences, ar_mask + + def seq2str(self, seq): + return "".join(self.id2char[x.item()] for x in seq) + + +# class ProblemUnion(Problem): +# problems = [ProblemByheart()] +# nb_common_codes = 100 + +# def generate_sequences(nb_samples): +# problem_indexes = torch.randint(len(problems), (nb_samples,)) +# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0) +# print(f"{nb_samples_per_problem}") +# all_seq = [] +# for nb, p in zip(nb_samples_per_problem, problems): +# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb])) +# return all_seq + +# for strain, stest in zip(train_seq, test_seq): +# s = torch.cat((strain, stest), 0) + +#################### + + +class SandBox(Task): + def __init__( + self, + problem, + nb_train_samples, + nb_test_samples, + batch_size, + logger=None, + device=torch.device("cpu"), + max_nb_codes=1024, + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + self.problem = problem + + self.train_input, self.train_ar_mask = self.problem.generate_sequences( + nb_train_samples + ) + self.test_input, self.test_ar_mask = self.problem.generate_sequences( + nb_test_samples + ) + + self.train_input, self.train_ar_mask = self.train_input.to( + device + ), self.train_ar_mask.to(device) + self.test_input, self.test_ar_mask = self.test_input.to( + device + ), self.test_ar_mask.to(device) + + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + + # A bit of paranoia never hurts + assert ( + self.nb_codes <= max_nb_codes + and self.train_input.min() >= 0 + and self.test_input.min() >= 0 + and tuple(self.train_ar_mask.unique()) == (0, 1) + and tuple(self.test_ar_mask.unique()) == (0, 1) + ) + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch + + def vocabulary_size(self): + return self.nb_codes + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + def compute_accuracy(input, ar_mask, logger=None): + input, ar_mask = input[:nmax], ar_mask[:nmax] + result = input.clone() * (1 - ar_mask) + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + progress_bar_desc=None, + device=self.device, + ) + + if logger is not None: + for sp, st in zip(result[:10], input[:10]): + logger( + f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}" + ) + logger( + f" {n_epoch} ground truth {self.problem.seq2str(st)}" + ) + + nb_total = ar_mask.sum().item() + nb_correct = ((result == input).long() * ar_mask).sum().item() + + return nb_total, nb_correct + + train_nb_total, train_nb_correct = compute_accuracy( + self.train_input, self.train_ar_mask + ) + + logger( + f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" + ) + + test_nb_total, test_nb_correct = compute_accuracy( + self.test_input, self.test_ar_mask, logger + ) + + logger( + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + ) + + ###################################################################### import picoclvr @@ -106,6 +338,8 @@ class PicoCLVR(Task): pruner_train=None, pruner_eval=None, ): + super().__init__() + def generate_descr(nb, cache_suffix, pruner): return picoclvr.generate( nb, @@ -294,6 +528,8 @@ class MNIST(Task): def __init__( self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu") ): + super().__init__() + self.nb_train_samples = (nb_train_samples,) self.nb_test_samples = (nb_test_samples,) self.batch_size = batch_size @@ -364,6 +600,8 @@ class Maze(Task): nb_walls, device=torch.device("cpu"), ): + super().__init__() + self.batch_size = batch_size self.height = height self.width = width @@ -535,6 +773,8 @@ class Snake(Task): prompt_length, device=torch.device("cpu"), ): + super().__init__() + self.batch_size = batch_size self.height = height self.width = width @@ -590,8 +830,6 @@ class Snake(Task): ) result *= 1 - ar_mask - # snake.solver(result,ar_mask) - masked_inplace_autoregression( model, self.batch_size, @@ -605,19 +843,8 @@ class Snake(Task): nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum() - # nb_total = result.size(0) - # nb_correct = ((result - input).abs().sum(1) == 0).sum() - return nb_total, nb_correct - # train_nb_total, train_nb_correct = compute_nb_correct( - # self.train_input, self.train_prior_visits - # ) - - # logger( - # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" - # ) - test_nb_total, test_nb_correct = compute_nb_correct( self.test_input[:1000], self.test_prior_visits[:1000] ) @@ -646,6 +873,8 @@ class Stack(Task): fraction_values_for_train=None, device=torch.device("cpu"), ): + super().__init__() + self.batch_size = batch_size self.nb_steps = nb_steps self.nb_stacks = nb_stacks @@ -793,6 +1022,8 @@ class Expr(Task): batch_size, device=torch.device("cpu"), ): + super().__init__() + self.batch_size = batch_size self.device = device @@ -880,17 +1111,22 @@ class Expr(Task): values_input = expr.extract_results([self.seq2str(s) for s in input]) values_result = expr.extract_results([self.seq2str(s) for s in result]) - for i, r in zip(values_input, values_result): - for n, vi in i.items(): - vr = r.get(n) - if vr is None or vr < 0: - nb_missed += 1 - else: - d = abs(vr - vi) - if d >= nb_delta.size(0): + filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt") + + with open(filename, "w") as f: + for i, r in zip(values_input, values_result): + for n, vi in i.items(): + vr = r.get(n) + f.write(f"{vi} {-1 if vr is None else vr}\n") + + if vr is None or vr < 0: nb_missed += 1 else: - nb_delta[d] += 1 + d = abs(vr - vi) + if d >= nb_delta.size(0): + nb_missed += 1 + else: + nb_delta[d] += 1 ###################################################################### @@ -952,3 +1188,125 @@ class Expr(Task): ###################################################################### + +import world + + +class World(Task): + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + vqae_nb_epochs, + logger=None, + device=torch.device("cpu"), + device_storage=torch.device("cpu"), + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + + ( + train_frames, + train_action_seq, + test_frames, + test_action_seq, + self.frame2seq, + self.seq2frame, + ) = world.create_data_and_processors( + nb_train_samples, + nb_test_samples, + mode="first_last", + nb_steps=30, + nb_epochs=vqae_nb_epochs, + logger=logger, + device=device, + device_storage=device_storage, + ) + + train_frame_seq = self.frame2seq(train_frames).to(device_storage) + test_frame_seq = self.frame2seq(test_frames).to(device_storage) + + nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1 + nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1 + + self.len_frame_seq = train_frame_seq.size(1) + self.len_action_seq = train_action_seq.size(1) + self.nb_codes = nb_frame_codes + nb_action_codes + + train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1) + + train_action_seq += nb_frame_codes + self.train_input = torch.cat( + (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1 + ) + + test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1) + test_action_seq += nb_frame_codes + self.test_input = torch.cat( + (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1 + ) + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch.to(self.device) + + def vocabulary_size(self): + return self.nb_codes + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis + ): + k = torch.arange( + 2 * self.len_frame_seq + self.len_action_seq, device=self.device + )[None, :] + + input = self.test_input[:64].to(self.device) + result = input.clone() + + ar_mask = ( + (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result) + ) + result *= 1 - ar_mask + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + device=self.device, + ) + + seq_start = input[:, : self.len_frame_seq] + seq_end = input[:, self.len_frame_seq + self.len_action_seq :] + seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :] + + result = torch.cat( + (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1 + ) + result = result.reshape(-1, result.size(-1)) + + frames = self.seq2frame(result) + image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png") + torchvision.utils.save_image( + frames.float() / (world.Box.nb_rgb_levels - 1), + image_name, + nrow=12, + padding=1, + pad_value=0.0, + ) + logger(f"wrote {image_name}") + + +######################################################################