X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=b5ac916f1939d0fda01f70931dabb2cb7d0abe60;hb=3c6931f8ddc8160550e026d9e9610ef71260ce10;hp=24ba34591bb6833e9f0a550a4e104074486cec4a;hpb=43fbfaac1850098f5b1a9470c8e6ca3d5ab479fe;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 24ba345..b5ac916 100755 --- a/mygpt.py +++ b/mygpt.py @@ -655,17 +655,14 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) - if self.training and self.proba_flashback: - # insert_flash_back( - # self.rec_V, - # V, - # self.rec_K, - # K, - # t0, - # t1, - # CL, - # proba=self.proba_flashback / CL, - # ) + if self.training and self.proba_flashback > 0.0: + # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,) + + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available n = torch.arange(N, device=X.device)[:, None, None, None] t = torch.arange(t0, t1, device=X.device)[None, None, :, None]