X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=b5ac916f1939d0fda01f70931dabb2cb7d0abe60;hb=3c6931f8ddc8160550e026d9e9610ef71260ce10;hp=6e13ff878bae27c818d337a53a413f3ce1a35180;hpb=aa09a883611d6e323b4e8e678b867097fc13afaf;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 6e13ff8..b5ac916 100755 --- a/mygpt.py +++ b/mygpt.py @@ -655,24 +655,15 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) - if self.training and self.proba_flashback: + if self.training and self.proba_flashback > 0.0: + # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,) + # This piece of code makes the assumption that there is # nothing informative before t0, otherwise we'd have to # implement a cache for V and K too. This should not be # too much of a problem since this is used only during # train, where full sequence are available - # insert_flash_back( - # self.rec_V, - # V, - # self.rec_K, - # K, - # t0, - # t1, - # CL, - # proba=self.proba_flashback / CL, - # ) - n = torch.arange(N, device=X.device)[:, None, None, None] t = torch.arange(t0, t1, device=X.device)[None, None, :, None] dv = torch.arange(DV, device=X.device)[None, None, None, :]