-elif args.task == "expr":
- task = tasks.Expr(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- nb_variables=args.expr_nb_variables,
- sequence_length=args.expr_sequence_length,
- operand_max=args.expr_operand_max,
- result_max=args.expr_result_max,
- batch_size=args.batch_size,
- device=device,
- )
+ nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
+
+ for input in quiz_machine.batches(model, split="test"):
+ input = input.to(local_device)
+
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
+
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+
+ acc_test_loss += loss.item() * input.size(0)
+
+ nb_test_samples += input.size(0)
+
+ test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+
+ log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}")
+
+ model.main_test_accuracy = quiz_machine.produce_results(
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ deterministic_synthesis=deterministic_synthesis,
+ )
+
+
+def one_epoch(model, quiz_machine, local_device=main_device):
+ model.to(local_device).train()
+
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
+
+ nb_train_samples, acc_train_loss = 0, 0.0
+
+ for input in quiz_machine.batches(model, split="train"):
+ input = input.to(local_device)
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.zero_grad()
+
+ output = model(mygpt.BracketedSequence(input)).x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_train_loss += loss.item() * input.size(0)
+
+ nb_train_samples += input.size(0)
+
+ loss.backward()
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.step()
+
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+
+ log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}")
+
+ run_tests(model, quiz_machine, deterministic_synthesis=False)