Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index 37515b5..5234d6f 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -219,6 +219,12 @@ default_task_args = {
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
+    "world": {
+        "model": "37M",
+        "batch_size": 25,
+        "nb_train_samples": 50000,
+        "nb_test_samples": 10000,
+    },
     "byheart": {
         "model": "37M",
         "batch_size": 25,
@@ -463,6 +469,16 @@ elif args.task == "byheart":
     )
     args.max_percents_of_test_in_train = -1
 
+elif args.task == "world":
+    task = tasks.World(
+        nb_train_samples=args.nb_train_samples,
+        nb_test_samples=args.nb_test_samples,
+        batch_size=args.physical_batch_size,
+        logger=log_string,
+        device=device,
+    )
+    args.max_percents_of_test_in_train = -1
+
 elif args.task == "learnop":
     task = tasks.SandBox(
         problem=problems.ProblemLearnOperator(),
@@ -844,7 +860,7 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs):
             input = input.to(device)
 
             bs = model(mygpt.BracketedSequence(input))
-            output_ar = bs.x
+            output = bs.x
 
             loss = F.cross_entropy(output.transpose(1, 2), input)