5 ######################################################################
7 # Given A is NxTx1 and X is NxTxD, expands A and X in place in O(T),
8 # and O(log(T)) if not core-bounded, so that
11 # Y[:, t] = A[:, t] * Y[:, t-1] + X[:, t]
15 # Y[:, t] = A[:, t] * Y0 + X[:, t]
21 T = 2 * (A.size(1) // 2)
22 Aa = A[:, :T].view(A.size(0), T // 2, 2, -1)
23 Xa = X[:, :T].view(X.size(0), T // 2, 2, -1)
24 Xa[:, :, 1].add_(Aa[:, :, 1].mul(Xa[:, :, 0]))
25 Aa[:, :, 1].mul_(Aa[:, :, 0])
26 expand(Aa[:, :, 1], Xa[:, :, 1])
27 Xa[:, 1:, 0].add_(Aa[:, 1:, 0].mul(Xa[:, :-1, 1]))
28 Aa[:, 1:, 0].mul_(Aa[:, :-1, 1])
30 X[:, -1].add_(A[:, -1].mul(X[:, -2]))
31 A[:, -1].mul_(A[:, -2])
34 # Computes inplace Y[:, s] = \sum_{t >= s} X[:, t]
40 T = 2 * (X.size(1) // 2)
41 Xa = X[:, -T:].view(X.size(0), T // 2, 2, -1)
42 Xa[:, :, 0].add_(Xa[:, :, 1])
44 Xa[:, :-1, 1].add_(Xa[:, 1:, 0])
49 class PScan(torch.autograd.Function):
51 def forward(ctx, A, X, Y0):
52 ctx.A = A[:, :, None].clone()
53 ctx.Y0 = Y0[:, None, :].clone()
54 ctx.A_star = A[:, :, None].clone()
55 ctx.X_star = X.clone()
56 expand(ctx.A_star, ctx.X_star)
57 return ctx.A_star * ctx.Y0 + ctx.X_star
60 def backward(ctx, grad_output):
61 U = grad_output * ctx.A_star
65 Q[:, 1:].add_(ctx.X_star[:, :-1] / ctx.A_star[:, 1:])
66 return (Q * R).sum(-1), R / ctx.A_star, U
71 ######################################################################
73 if __name__ == "__main__":
74 A = torch.randn(1, 5, dtype=torch.float64).requires_grad_()
75 X = torch.randn(1, 5, 3, dtype=torch.float64).requires_grad_()
76 Y0 = torch.randn(1, 3, dtype=torch.float64).requires_grad_()
80 for k in range(A.size(1)):
81 y = A[:, k, None] * y + X[:, k]
84 print(torch.autograd.grad(y.mean(), A, retain_graph=True))
85 print(torch.autograd.grad(y.mean(), X, retain_graph=True))
86 print(torch.autograd.grad(y.mean(), Y0, retain_graph=True))
92 for k in range(A.size(1)):
93 print(f"{k} -> {Y[:,k]}")
97 print(torch.autograd.grad(y.mean(), A, retain_graph=True))
98 print(torch.autograd.grad(y.mean(), X, retain_graph=True))
99 print(torch.autograd.grad(y.mean(), Y0, retain_graph=True))