pass
+######################################################################
+
+
+class Problem:
+ def generate(nb):
+ pass
+
+ def perf(seq, logger):
+ pass
+
+
+class ProblemByheart(Problem):
+ def __init__(self):
+ nb_seq, len_prompt, len_result = 100, 5, 5
+ self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result))
+ self.seq[:,len_prompt]=-1
+
+ def generate_sequences(self, nb):
+ return self.seq[torch.randint(self.seq.size(0), (nb,))]
+
+class SandBox(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ logger=None,
+ device=torch.device("cpu"),
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+
+ problems = [ ProblemByheart() ]
+ nb_common_codes = 100
+
+ def generate_sequences(nb_samples):
+ problem_indexes = torch.randint(len(problems), (nb_samples,))
+ nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
+ print(f"{nb_samples_per_problem}")
+ all_seq = []
+ for nb, p in zip(nb_samples_per_problem,problems):
+ all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
+ return all_seq
+
+ train_seq = generate_sequences(nb_train_samples)
+ test_seq = generate_sequences(nb_test_samples)
+
+ for strain, stest in zip(train_seq, test_seq):
+ s = torch.cat((strain,stest),0)
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ # logger(
+ # f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ # )
+ pass
+
+
######################################################################
import picoclvr
pruner_train=None,
pruner_eval=None,
):
+ super().__init__()
+
def generate_descr(nb, cache_suffix, pruner):
return picoclvr.generate(
nb,
def __init__(
self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
):
+ super().__init__()
+
self.nb_train_samples = (nb_train_samples,)
self.nb_test_samples = (nb_test_samples,)
self.batch_size = batch_size
nb_walls,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.height = height
self.width = width
prompt_length,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.height = height
self.width = width
fraction_values_for_train=None,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.nb_steps = nb_steps
self.nb_stacks = nb_stacks
batch_size,
device=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.device = device
vqae_nb_epochs,
logger=None,
device=torch.device("cpu"),
+ device_storage=torch.device("cpu"),
):
+ super().__init__()
+
self.batch_size = batch_size
self.device = device
nb_epochs=vqae_nb_epochs,
logger=logger,
device=device,
+ device_storage=device_storage,
)
print(f"{train_action_seq.size()=}")
- train_frame_seq = self.frame2seq(train_frames)
- test_frame_seq = self.frame2seq(test_frames)
+ train_frame_seq = self.frame2seq(train_frames).to(device_storage)
+ test_frame_seq = self.frame2seq(test_frames).to(device_storage)
nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
self.nb_codes = nb_frame_codes + nb_action_codes
train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
+ print(f"{train_action_seq.device=} {nb_frame_codes.device=}")
train_action_seq += nb_frame_codes
self.train_input = torch.cat(
(train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
for batch in tqdm.tqdm(
input.split(self.batch_size), dynamic_ncols=True, desc=desc
):
- yield batch
+ yield batch.to(self.device)
def vocabulary_size(self):
return self.nb_codes
2 * self.len_frame_seq + self.len_action_seq, device=self.device
)[None, :]
- input = self.test_input[:64]
+ input = self.test_input[:64].to(self.device)
result = input.clone()
ar_mask = (