projects
/
culture.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update.
[culture.git]
/
main.py
diff --git
a/main.py
b/main.py
index
79b4b56
..
ebecad8
100755
(executable)
--- a/
main.py
+++ b/
main.py
@@
-14,6
+14,14
@@
from torch.nn import functional as F
import ffutils
import mygpt, tasks
import ffutils
import mygpt, tasks
+# world quizzes vs. culture quizzes
+
+######################################################################
+
+accuracy_to_make_c_quizzes = 0.975
+nb_new_c_quizzes_for_train = 1000
+nb_new_c_quizzes_for_test = 100
+
######################################################################
if torch.cuda.is_available():
######################################################################
if torch.cuda.is_available():
@@
-73,7
+81,7
@@
parser.add_argument("--deterministic_synthesis", action="store_true", default=Fa
parser.add_argument("--nb_gpts", type=int, default=5)
parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--
check
", action="store_true", default=False)
+parser.add_argument("--
dirty_debug
", action="store_true", default=False)
######################################################################
######################################################################
@@
-84,6
+92,13
@@
if args.result_dir is None:
######################################################################
######################################################################
+if args.dirty_debug:
+ accuracy_to_make_c_quizzes = 0.0
+ nb_new_c_quizzes_for_train = 100
+ nb_new_c_quizzes_for_test = 10
+
+######################################################################
+
default_args = {
"model": "37M",
"batch_size": 100,
default_args = {
"model": "37M",
"batch_size": 100,
@@
-182,7
+197,7
@@
for n in vars(args):
######################################################################
######################################################################
-if args.
check
:
+if args.
dirty_debug
:
args.nb_train_samples = 2500
args.nb_test_samples = 100
args.nb_train_samples = 2500
args.nb_test_samples = 100
@@
-329,43
+344,59
@@
def run_tests(model, task, deterministic_synthesis):
######################################################################
######################################################################
-def create_quizzes(
+def create_
c_
quizzes(
model,
other_models,
task,
nb_for_train=1000,
nb_for_test=100,
model,
other_models,
task,
nb_for_train=1000,
nb_for_test=100,
+ desired_average_logits=None,
):
kept = []
):
kept = []
+ sum_logits, sum_nb_c_quizzes = 0, 0
+
while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
- new_quizzes, nb_correct = task.create_new_quizzes(
+ nb_to_generate = 4 * (nb_for_train + nb_for_test)
+
+ new_c_quizzes, nb_correct, average_logits = task.create_c_quizzes(
n_epoch=n_epoch,
result_dir=args.result_dir,
logger=log_string,
n_epoch=n_epoch,
result_dir=args.result_dir,
logger=log_string,
- nb=
4 * (nb_for_train + nb_for_test)
,
+ nb=
nb_to_generate
,
model=model,
other_models=other_models,
model=model,
other_models=other_models,
+ desired_average_logits=desired_average_logits,
)
)
- print(nb_correct)
+ sum_logits += new_c_quizzes.size(0) * average_logits
+ sum_nb_c_quizzes += new_c_quizzes.size(0)
+
+ to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
+
+ if args.dirty_debug:
+ to_keep = new_c_quizzes
+
+ log_string(
+ f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
+ )
- to_keep = new_quizzes[nb_correct == len(other_models) - 1]
- log_string(f"keep {to_keep.size(0)} quizzes")
kept.append(to_keep)
kept.append(to_keep)
- new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+ new_
c_
quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
- task.store_
new_quizzes(new
_quizzes[:nb_for_train], for_train=True)
- task.store_
new_quizzes(new
_quizzes[nb_for_train:], for_train=False)
+ task.store_
c_quizzes(new_c
_quizzes[:nb_for_train], for_train=True)
+ task.store_
c_quizzes(new_c
_quizzes[nb_for_train:], for_train=False)
- task.save_
image
(
- new_quizzes[:72],
+ task.save_
quizzes
(
+ new_
c_
quizzes[:72],
args.result_dir,
args.result_dir,
- f"
world_quiz_{n_epoch:04d}_{model.id:02d}.png
",
+ f"
culture_c_quiz_{n_epoch:04d}_{model.id:02d}
",
log_string,
)
log_string,
)
+ return sum_logits / sum_nb_c_quizzes
+
######################################################################
######################################################################
@@
-394,17
+425,12
@@
log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
######################################################################
######################################################################
-accuracy_to_make_quizzes = 0.975
-nb_new_quizzes_for_train = 1000
-nb_new_quizzes_for_test = 100
-
-if args.check:
- accuracy_to_make_quizzes = 0.0
- nb_new_quizzes_for_train = 10
- nb_new_quizzes_for_test = 10
+desired_average_logits = None
for n_epoch in range(args.nb_epochs):
for n_epoch in range(args.nb_epochs):
- a = [(model.id, model.main_test_accuracy) for model in models]
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
+
+ a = [(model.id, float(model.main_test_accuracy)) for model in models]
a.sort(key=lambda p: p[0])
log_string(f"current accuracies {a}")
a.sort(key=lambda p: p[0])
log_string(f"current accuracies {a}")
@@
-419,25
+445,40
@@
for n_epoch in range(args.nb_epochs):
# improve it
one_epoch(model, task)
# improve it
one_epoch(model, task)
+ task.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+
log_string(
log_string(
- f"train_set_composition w
orld {task.nb_batch_samples_world} quizzes {task.nb_batch_samples
_quizzes}"
+ f"train_set_composition w
_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c
_quizzes}"
)
# test it
run_tests(model, task, deterministic_synthesis=False)
)
# test it
run_tests(model, task, deterministic_synthesis=False)
- if model.main_test_accuracy >= accuracy_to_make_quizzes:
+ log_string(
+ f"test_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}"
+ )
+
+ if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
other_models = models.copy()
other_models.remove(model)
other_models = models.copy()
other_models.remove(model)
-
create
_quizzes(
+
average_logits = create_c
_quizzes(
model,
other_models,
task,
model,
other_models,
task,
- nb_for_train=nb_new_quizzes_for_train,
- nb_for_test=nb_new_quizzes_for_test,
+ nb_for_train=nb_new_c_quizzes_for_train,
+ nb_for_test=nb_new_c_quizzes_for_test,
+ desired_average_logits=desired_average_logits,
)
)
+ # We keep the first average logits as a reference
+ if desired_average_logits is None:
+ desired_average_logits = average_logits
+ else:
+ log_string(
+ f"desired_average_logits {desired_average_logits} average_logits {average_logits}"
+ )
+
# We update everyone
for model in models:
run_tests(model, task, deterministic_synthesis=False)
# We update everyone
for model in models:
run_tests(model, task, deterministic_synthesis=False)