projects
/
picoclvr.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update.
[picoclvr.git]
/
main.py
diff --git
a/main.py
b/main.py
index
5b49468
..
abed321
100755
(executable)
--- a/
main.py
+++ b/
main.py
@@
-14,7
+14,7
@@
import torch, torchvision
from torch import nn
from torch.nn import functional as F
from torch import nn
from torch.nn import functional as F
-import mygpt, tasks
, tensorstack
+import mygpt, tasks
######################################################################
######################################################################
@@
-125,7
+125,9
@@
parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.7
parser.add_argument("--expr_nb_variables", type=int, default=5)
parser.add_argument("--expr_nb_variables", type=int, default=5)
-parser.add_argument("--expr_sequence_length", type=int, default=30)
+parser.add_argument("--expr_sequence_length", type=int, default=40)
+
+parser.add_argument("--expr_input_file", type=str, default=None)
######################################################################
######################################################################
@@
-170,9
+172,9
@@
default_args = {
"nb_test_samples": 1000,
},
"expr": {
"nb_test_samples": 1000,
},
"expr": {
- "nb_epochs":
5
0,
+ "nb_epochs":
4
0,
"batch_size": 25,
"batch_size": 25,
- "nb_train_samples":
25
0000,
+ "nb_train_samples":
100
0000,
"nb_test_samples": 10000,
},
}
"nb_test_samples": 10000,
},
}
@@
-366,6
+368,20
@@
else:
######################################################################
######################################################################
+if args.task == "expr" and args.expr_input_file is not None:
+ task.produce_results(
+ nb_epochs_finished,
+ model,
+ args.result_dir,
+ log_string,
+ args.deterministic_synthesis,
+ args.expr_input_file,
+ )
+
+ exit(0)
+
+######################################################################
+
nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
# Compute the entropy of the training tokens
nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
# Compute the entropy of the training tokens
@@
-383,20
+399,28
@@
train_set_perplexity = math.exp(entropy)
train_examples = {}
train_examples = {}
+
for input in task.batches(split="train"):
for input in task.batches(split="train"):
- assert input.dim()
==2 and input.dtype==
torch.int64
+ assert input.dim()
== 2 and input.dtype ==
torch.int64
for x in input:
for x in input:
- train_examples[x.sum().item()]
=
x
+ train_examples[x.sum().item()]
=
x
+nb_total, nb_collisions = 0, 0
for input in task.batches(split="test"):
for input in task.batches(split="test"):
- assert input.dim()
==2 and input.dtype==
torch.int64
+ assert input.dim()
== 2 and input.dtype ==
torch.int64
for x in input:
for x in input:
+ nb_total += 1
y = train_examples.get(x.sum().item())
if y is not None:
y = train_examples.get(x.sum().item())
if y is not None:
- assert x.size() != y.size() or (x-y).abs().sum() > 0
+ if x.size() == y.size() and (x - y).abs().sum() == 0:
+ nb_collisions += 1
del train_examples
del train_examples
+log_string(
+ f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set"
+)
+
##############################
if args.learning_rate_schedule == "cos":
##############################
if args.learning_rate_schedule == "cos":