progress_bar_desc="autoregression",
device=torch.device("cpu"),
):
+ assert input.size() == ar_mask.size()
+
batches = zip(input.split(batch_size), ar_mask.split(batch_size))
if progress_bar_desc is not None:
batches,
dynamic_ncols=True,
desc=progress_bar_desc,
- total=input.size(0) // batch_size,
+ # total=input.size(0) // batch_size,
)
with torch.autograd.no_grad():
pass
+######################################################################
+
+
+class Problem:
+ def generate_sequences(self, nb):
+ pass
+
+ def log_performance(self, sequences, logger):
+ pass
+
+
+class ProblemByheart(Problem):
+ def __init__(self):
+ nb_seq, len_prompt, len_result = 100, 5, 5
+ self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result))
+ self.seq[:, len_prompt] = 10
+
+ def generate_sequences(self, nb):
+ sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
+ ar_mask = (sequences == 10).long()
+ ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
+ return sequences, ar_mask
+
+ # problems = [ProblemByheart()]
+ # nb_common_codes = 100
+
+ # def generate_sequences(nb_samples):
+ # problem_indexes = torch.randint(len(problems), (nb_samples,))
+ # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
+ # print(f"{nb_samples_per_problem}")
+ # all_seq = []
+ # for nb, p in zip(nb_samples_per_problem, problems):
+ # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
+ # return all_seq
+
+ # for strain, stest in zip(train_seq, test_seq):
+ # s = torch.cat((strain, stest), 0)
+
+
+class SandBox(Task):
+ def __init__(
+ self,
+ problem,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ logger=None,
+ device=torch.device("cpu"),
+ max_nb_codes=1024,
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+ self.device = device
+
+ self.train_input, self.train_ar_mask = problem.generate_sequences(
+ nb_train_samples
+ )
+ self.test_input, self.test_ar_mask = problem.generate_sequences(nb_test_samples)
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ # A bit of paranoia never hurts
+ assert (
+ self.nb_codes <= max_nb_codes
+ and self.train_input.min() >= 0
+ and self.test_input.min() >= 0
+ and tuple(self.train_ar_mask.unique()) == (0, 1)
+ and tuple(self.test_ar_mask.unique()) == (0, 1)
+ )
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ def compute_accuracy(input, ar_mask):
+ result = input.clone() * (1 - ar_mask)
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ progress_bar_desc=None,
+ device=self.device,
+ )
+
+ nb_total = ar_mask.sum().item()
+ nb_correct = ((result == input).long() * ar_mask).sum().item()
+
+ return nb_total, nb_correct
+
+ train_nb_total, train_nb_correct = compute_accuracy(
+ self.train_input, self.train_ar_mask
+ )
+
+ logger(
+ f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ )
+
+ test_nb_total, test_nb_correct = compute_accuracy(
+ self.test_input, self.test_ar_mask
+ )
+
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+
######################################################################
import picoclvr
pruner_train=None,
pruner_eval=None,
):
+ super().__init__()
+
def generate_descr(nb, cache_suffix, pruner):
return picoclvr.generate(
nb,
def __init__(
self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
):
+ super().__init__()
+
self.nb_train_samples = (nb_train_samples,)
self.nb_test_samples = (nb_test_samples,)
self.batch_size = batch_size
nb_walls,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.height = height
self.width = width
prompt_length,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.height = height
self.width = width
)
result *= 1 - ar_mask
- # snake.solver(result,ar_mask)
-
masked_inplace_autoregression(
model,
self.batch_size,
nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
- # nb_total = result.size(0)
- # nb_correct = ((result - input).abs().sum(1) == 0).sum()
-
return nb_total, nb_correct
- # train_nb_total, train_nb_correct = compute_nb_correct(
- # self.train_input, self.train_prior_visits
- # )
-
- # logger(
- # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
- # )
-
test_nb_total, test_nb_correct = compute_nb_correct(
self.test_input[:1000], self.test_prior_visits[:1000]
)
fraction_values_for_train=None,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.nb_steps = nb_steps
self.nb_stacks = nb_stacks
batch_size,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.device = device
values_input = expr.extract_results([self.seq2str(s) for s in input])
values_result = expr.extract_results([self.seq2str(s) for s in result])
- for i, r in zip(values_input, values_result):
- for n, vi in i.items():
- vr = r.get(n)
- if vr is None or vr < 0:
- nb_missed += 1
- else:
- d = abs(vr - vi)
- if d >= nb_delta.size(0):
+ filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
+
+ with open(filename, "w") as f:
+ for i, r in zip(values_input, values_result):
+ for n, vi in i.items():
+ vr = r.get(n)
+ f.write(f"{vi} {-1 if vr is None else vr}\n")
+
+ if vr is None or vr < 0:
nb_missed += 1
else:
- nb_delta[d] += 1
+ d = abs(vr - vi)
+ if d >= nb_delta.size(0):
+ nb_missed += 1
+ else:
+ nb_delta[d] += 1
######################################################################
######################################################################
+
+import world
+
+
+class World(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ vqae_nb_epochs,
+ logger=None,
+ device=torch.device("cpu"),
+ device_storage=torch.device("cpu"),
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+ self.device = device
+
+ (
+ train_frames,
+ train_action_seq,
+ test_frames,
+ test_action_seq,
+ self.frame2seq,
+ self.seq2frame,
+ ) = world.create_data_and_processors(
+ nb_train_samples,
+ nb_test_samples,
+ mode="first_last",
+ nb_steps=30,
+ nb_epochs=vqae_nb_epochs,
+ logger=logger,
+ device=device,
+ device_storage=device_storage,
+ )
+
+ print(f"{train_action_seq.size()=}")
+
+ train_frame_seq = self.frame2seq(train_frames).to(device_storage)
+ test_frame_seq = self.frame2seq(test_frames).to(device_storage)
+
+ nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
+ nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
+
+ self.len_frame_seq = train_frame_seq.size(1)
+ self.len_action_seq = train_action_seq.size(1)
+ self.nb_codes = nb_frame_codes + nb_action_codes
+
+ train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
+ print(f"{train_action_seq.device=} {nb_frame_codes.device=}")
+ train_action_seq += nb_frame_codes
+ self.train_input = torch.cat(
+ (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
+ )
+
+ test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
+ test_action_seq += nb_frame_codes
+ self.test_input = torch.cat(
+ (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
+ )
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch.to(self.device)
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ k = torch.arange(
+ 2 * self.len_frame_seq + self.len_action_seq, device=self.device
+ )[None, :]
+
+ input = self.test_input[:64].to(self.device)
+ result = input.clone()
+
+ ar_mask = (
+ (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
+ )
+ result *= 1 - ar_mask
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ seq_start = input[:, : self.len_frame_seq]
+ seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
+ seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
+
+ result = torch.cat(
+ (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
+ )
+ result = result.reshape(-1, result.size(-1))
+ print(f"{result.size()=}")
+
+ frames = self.seq2frame(result)
+ image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
+ torchvision.utils.save_image(
+ frames.float() / (world.Box.nb_rgb_levels - 1),
+ image_name,
+ nrow=12,
+ padding=1,
+ pad_value=0.0,
+ )
+ logger(f"wrote {image_name}")
+
+
+######################################################################