parser.add_argument("--overwrite_results", action="store_true", default=False)
+parser.add_argument("--one_shot", action="store_true", default=False)
+
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
##############################
def masked_inplace_autoregression(model, batch_size, input, ar_mask):
-
for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
i = (ar_mask.sum(0) > 0).nonzero()
if i.min() > 0:
######################################################################
+def compute_perplexity(model, split="train"):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ nb_samples, acc_loss = 0, 0.0
+
+ for input in task.batches(split=split):
+ input = input.to(device)
+
+ output = model(mygpt.BracketedSequence(input)).x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_loss += loss.item() * input.size(0)
+ nb_samples += input.size(0)
+
+ model.train(t)
+
+ return math.exp(min(100, acc_loss / nb_samples))
+
+
+######################################################################
+
+
+def one_shot(gpt, task):
+ pass
+
+
+######################################################################
+
+
class Task:
def batches(self, split="train"):
pass
##############################
-nb_samples_seen = 0
+if args.one_shot:
+ one_shot(model, task)
+ exit(0)
+
+##############################
if nb_epochs_finished >= nb_epochs:
- task.produce_results(nb_epochs_finished, model)
+ n_epoch = nb_epochs_finished
+ train_perplexity = compute_perplexity(model, split="train")
+ test_perplexity = compute_perplexity(model, split="test")
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+ log_string(
+ f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+ )
+
+ task.produce_results(n_epoch, model)
+ exit(0)
+
+##############################
+
+for n_epoch in range(nb_epochs_finished, nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
- with torch.autograd.no_grad():
-
- model.eval()
-
- nb_test_samples, acc_test_loss = 0, 0.0
-
- for input in task.batches(split="test"):
- input = input.to(device)
-
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_test_loss += loss.item() * input.size(0)
- nb_test_samples += input.size(0)
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+ test_perplexity = compute_perplexity(model, split="test")
- train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
-
- log_string(
- f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
- )
+ log_string(
+ f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+ )
- task.produce_results(n_epoch, model)
+ task.produce_results(n_epoch, model)
checkpoint = {
"nb_epochs_finished": n_epoch + 1,