- result = input.clone()
- ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
- result = (1 - ar_mask) * result + ar_mask * self.filler
- for n in range(result.size(0)):
- logger(f"test_before {self.seq2str(result[n])}")
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
- correct = (1 - ar_mask) * self.space + ar_mask * input
- for n in range(result.size(0)):
- comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
- logger(f"test_after {self.seq2str(result[n])} {comment}")
- logger(f"correct {self.seq2str(correct[n])}")
- ##############################################################
-
- model.train(t)
+ else:
+ with open(input_file, "r") as f:
+ sequences = [e.strip() for e in f.readlines()]
+ sequences = [s + " " + "#" * 50 for s in sequences]
+ input = self.tensorize(sequences)
+
+ result = input.clone()
+ s = (result == self.space).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.filler
+
+ for n in range(result.size(0)):
+ logger(f"test_before {self.seq2str(result[n])}")
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ correct = (1 - ar_mask) * self.space + ar_mask * input
+ for n in range(result.size(0)):
+ comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
+ logger(f"test_after {self.seq2str(result[n])} {comment}")
+ logger(f"truth {self.seq2str(correct[n])}")
+ ##############################################################
+
+
+######################################################################
+
+import world
+
+
+class World(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ vqae_nb_epochs,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.device = device
+
+ (
+ train_frames,
+ self.train_actions,
+ test_frames,
+ self.test_actions,
+ self.frame2seq,
+ self.seq2frame,
+ ) = world.create_data_and_processors(
+ nb_train_samples,
+ nb_test_samples,
+ mode="first_last",
+ nb_steps=30,
+ nb_epochs=vqae_nb_epochs,
+ device=device,
+ )
+
+ self.train_input = self.frame2seq(train_frames)
+ self.train_input = self.train_input.reshape(self.train_input.size(0) // 2, -1)
+ self.test_input = self.frame2seq(test_frames)
+ self.test_input = self.test_input.reshape(self.test_input.size(0) // 2, -1)
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ l = self.train_input.size(1)
+ k = torch.arange(l, device=self.device)[None, :]
+ result = self.test_input[:64].clone()
+
+ ar_mask = (k >= l // 2).long().expand_as(result)
+ result *= 1 - ar_mask
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ result = result.reshape(result.size(0) * 2, -1)
+
+ frames = self.seq2frame(result)
+ image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
+ torchvision.utils.save_image(
+ frames.float() / (world.Box.nb_rgb_levels - 1),
+ image_name,
+ nrow=8,
+ padding=1,
+ pad_value=0.0,
+ )
+ logger(f"wrote {image_name}")