# Written by Francois Fleuret <francois@fleuret.org>
-# torch.backends.cuda.matmul.allow_tf23
-# torch.autocast(torch.bfloat16)
-
import math, sys, argparse, time, tqdm, os
import torch, torchvision
from torch.nn import functional as F
import ffutils
-import mygpt, tasks
+import mygpt, tasks, problems
######################################################################
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
##############################
-# picoclvr options
+# rpl options
+
+parser.add_argument("--rpl_nb_starting_values", type=int, default=5)
+
+parser.add_argument("--rpl_max_input", type=int, default=9)
+
+parser.add_argument("--rpl_prog_len", type=int, default=10)
+
+parser.add_argument("--rpl_nb_runs", type=int, default=8)
+
+parser.add_argument("--rpl_no_prog", action="store_true", default=False)
+
+##############################
+# sandbox options
parser.add_argument("--sandbox_level", type=int, default=0)
"rpl": {
"nb_epochs": 40,
"batch_size": 25,
- "nb_train_samples": 1000000,
+ "nb_train_samples": 100000,
"nb_test_samples": 10000,
},
"world": {
if args.task == "sandbox":
if args.sandbox_level == 0:
- problem = tasks.ProblemLevel0(
+ problem = problems.ProblemLevel0(
nb_sentences=args.sandbox_levels_nb_items,
len_prompt=args.sandbox_levels_len_source,
len_result=args.sandbox_levels_len_result,
)
elif args.sandbox_level == 1:
- problem = tasks.ProblemLevel1(
+ problem = problems.ProblemLevel1(
nb_operators=args.sandbox_levels_nb_items,
len_source=args.sandbox_levels_len_source,
len_result=args.sandbox_levels_len_result,
)
elif args.sandbox_level == 2:
- problem = tasks.ProblemLevel2(
+ problem = problems.ProblemLevel2(
len_source=args.sandbox_levels_len_source,
len_result=args.sandbox_levels_len_result,
)
task = tasks.SandBox(
problem,
- # tasks.ProblemAddition(zero_padded=False, inverted_result=False),
+ # problems.ProblemAddition(zero_padded=False, inverted_result=False),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
+ nb_starting_values=args.rpl_nb_starting_values,
+ max_input=args.rpl_max_input,
+ prog_len=args.rpl_prog_len,
+ nb_runs=args.rpl_nb_runs,
+ no_prog=args.rpl_no_prog,
+ logger=log_string,
device=device,
)
if args.task == "expr" and args.expr_input_file is not None:
task.produce_results(
- nb_epochs_finished,
- model,
- args.result_dir,
- log_string,
- args.deterministic_synthesis,
- args.expr_input_file,
+ n_epoch=nb_epochs_finished,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
+ input_file=args.expr_input_file,
)
exit(0)
if nb_epochs_finished >= nb_epochs:
task.produce_results(
- nb_epochs_finished,
- model,
- args.result_dir,
- log_string,
- args.deterministic_synthesis,
+ n_epoch=nb_epochs_finished,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
)
for n_epoch in range(nb_epochs_finished, nb_epochs):
)
task.produce_results(
- n_epoch, model, args.result_dir, log_string, args.deterministic_synthesis
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
)
checkpoint = {