# Compute the entropy of the training tokens
token_count = 0
-for input in task.batches(split="train"):
+for input in task.batches(split="train", desc="train-entropy"):
token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
token_probas = token_count / token_count.sum()
entropy = -torch.xlogy(token_probas, token_probas).sum()
yield s
nb_test, nb_in_train = 0, 0
- for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
+ for test_subset in subsets_as_tuples(
+ task.batches(split="test", desc="test-check"), 25000
+ ):
in_train = set()
- for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
+ for train_subset in subsets_as_tuples(
+ task.batches(split="train", desc="train-check"), 25000
+ ):
in_train.update(test_subset.intersection(train_subset))
nb_in_train += len(in_train)
nb_test += len(test_subset)
progress_bar_desc=None,
)
warnings.warn("keeping thinking snapshots", RuntimeWarning)
- snapshots.append(result[:10].detach().clone())
+ snapshots.append(result[:100].detach().clone())
# Generate iteration after iteration
# Set the lookahead_reward to UNKNOWN for the next iterations
result[
:, u + self.world.index_lookahead_reward
- ] = self.world.lookahead_reward2code(gree.REWARD_UNKNOWN)
+ ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
with open(filename, "w") as f:
- for n in range(10):
+ for n in range(snapshots[0].size(0)):
for s in snapshots:
lr, s, a, r = self.world.seq2episodes(
s[n : n + 1],