)
parser.add_argument(
- "--task", type=str, default="picoclvr", help="picoclvr, mnist, maze, snake, stack"
+ "--task",
+ type=str,
+ default="picoclvr",
+ help="picoclvr, mnist, maze, snake, stack, expr",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
-parser.add_argument("--result_dir", type=str, default="results_default")
+parser.add_argument("--result_dir", type=str, default=None)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
+##############################
+# Expr options
+
+parser.add_argument("--expr_nb_variables", type=int, default=5)
+
+parser.add_argument("--expr_sequence_length", type=int, default=30)
+
######################################################################
args = parser.parse_args()
assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
-try:
- os.mkdir(args.result_dir)
-except FileExistsError:
- if not args.overwrite_results:
- print(f"result directory {args.result_dir} already exists")
- exit(1)
-
-log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
-
-if args.seed >= 0:
- # torch.backends.cudnn.deterministic = True
- # torch.backends.cudnn.benchmark = False
- # torch.use_deterministic_algorithms(True)
- torch.manual_seed(args.seed)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(args.seed)
+if args.result_dir is None:
+ args.result_dir = f"results_{args.task}"
######################################################################
"nb_train_samples": 100000,
"nb_test_samples": 1000,
},
+ "expr": {
+ "nb_epochs": 50,
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
}
if args.task in default_args:
######################################################################
+try:
+ os.mkdir(args.result_dir)
+except FileExistsError:
+ if not args.overwrite_results:
+ print(f"result directory {args.result_dir} already exists")
+ exit(1)
+
+log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
+
+if args.seed >= 0:
+ # torch.backends.cudnn.deterministic = True
+ # torch.backends.cudnn.benchmark = False
+ # torch.use_deterministic_algorithms(True)
+ torch.manual_seed(args.seed)
+ if torch.cuda.is_available():
+ torch.cuda.manual_seed_all(args.seed)
+
+######################################################################
+
def log_string(s):
t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
progress_bar_desc="autoregression",
device=torch.device("cpu"),
):
- # p = logits.softmax(1)
- # entropy[:,s]= p.xlogy(p).sum(1) / math.log(2)
batches = zip(input.split(batch_size), ar_mask.split(batch_size))
+
if progress_bar_desc is not None:
batches = tqdm.tqdm(
batches,
desc=progress_bar_desc,
total=input.size(0) // batch_size,
)
+
for input, ar_mask in batches:
i = (ar_mask.sum(0) > 0).nonzero()
if i.min() > 0:
model, "train", nb_to_use=1000
)
log_string(
- f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
)
test_nb_total, test_nb_correct, count = self.compute_error(
model, "test", nb_to_use=1000
)
log_string(
- f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
if count is not None:
)
log_string(
- f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
model.train(t)
self.device,
)
- mask = self.test_input.clone()
- stack.remove_popped_values(mask, self.nb_stacks, self.nb_digits)
- mask = mask != self.test_input
- counts = self.test_stack_counts.flatten()[mask.flatten()]
+ i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
+ counts = self.test_stack_counts.flatten()[i.flatten()]
counts = F.one_hot(counts).sum(0)
- log_string(f"stack_count {counts}")
+ log_string(f"test_pop_stack_counts {counts}")
self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
log_string(
- f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
- #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- input = self.test_input[:10, :50]
+ ##############################################################
+ # Log a few generated sequences
+ input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
result = input.clone()
stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
ar_mask = (result != input).long()
log_string(
f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
)
- #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ##############################################################
+
+ model.train(t)
+
+
+######################################################################
+
+
+import expr
+
+
+class TaskExpr(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ nb_variables,
+ sequence_length,
+ batch_size,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.device = device
+
+ train_sequences = expr.generate_sequences(
+ nb_train_samples,
+ nb_variables=nb_variables,
+ length=sequence_length,
+ # length=2 * sequence_length,
+ # randomize_length=True,
+ )
+ test_sequences = expr.generate_sequences(
+ nb_test_samples,
+ nb_variables=nb_variables,
+ length=sequence_length,
+ )
+ self.char2id = dict(
+ [
+ (c, n)
+ for n, c in enumerate(
+ set("#" + "".join(train_sequences + test_sequences))
+ )
+ ]
+ )
+ self.id2char = dict([(n, c) for c, n in self.char2id.items()])
+
+ self.filler, self.space = self.char2id["#"], self.char2id[" "]
+
+ len_max = max([len(x) for x in train_sequences])
+ self.train_input = torch.cat(
+ [
+ torch.tensor(
+ [
+ [self.char2id[c] for c in s + "#" * (len_max - len(s))]
+ for s in train_sequences
+ ]
+ )
+ ],
+ 0,
+ ).to(device)
+
+ len_max = max([len(x) for x in test_sequences])
+ self.test_input = torch.cat(
+ [
+ torch.tensor(
+ [
+ [self.char2id[c] for c in s + "#" * (len_max - len(s))]
+ for s in test_sequences
+ ]
+ )
+ ],
+ 0,
+ ).to(device)
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ if split == "train":
+ last = (batch != self.filler).max(0).values.nonzero().max() + 1
+ batch = batch[:, :last]
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def seq2str(self, s):
+ return "".join([self.id2char[k.item()] for k in s])
+
+ def produce_results(self, n_epoch, model):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ def compute_nb_correct(input):
+ result = input.clone()
+ ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.filler
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+
+ nb_total = input.size(0)
+ nb_correct = (input == result).long().min(1).values.sum()
+
+ values_input = expr.extract_results([self.seq2str(s) for s in input])
+ max_input = max([max(x.values()) for x in values_input])
+ values_result = expr.extract_results([self.seq2str(s) for s in result])
+ max_result = max(
+ [-1 if len(x) == 0 else max(x.values()) for x in values_result]
+ )
+
+ nb_missing, nb_predicted = torch.zeros(max_input + 1), torch.zeros(
+ max_input + 1, max_result + 1
+ )
+ for i, r in zip(values_input, values_result):
+ for n, vi in i.items():
+ vr = r.get(n)
+ if vr is None or vr < 0:
+ nb_missing[vi] += 1
+ else:
+ nb_predicted[vi, vr] += 1
+
+ return nb_total, nb_correct
+
+ test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
+
+ log_string(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+ ##############################################################
+ # Log a few generated sequences
+ input = self.test_input[:10]
+ result = input.clone()
+ ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.filler
+ for n in range(result.size(0)):
+ log_string(f"test_before {self.seq2str(result[n])}")
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+ correct = (1 - ar_mask) * self.space + ar_mask * input
+ for n in range(result.size(0)):
+ comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
+ log_string(f"test_after {self.seq2str(result[n])} {comment}")
+ log_string(f"correct {self.seq2str(correct[n])}")
+ ##############################################################
model.train(t)
device=device,
)
+elif args.task == "expr":
+ task = TaskExpr(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ nb_variables=args.expr_nb_variables,
+ sequence_length=args.expr_sequence_length,
+ batch_size=args.batch_size,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")