parser.add_argument('--ema_decay',
type = float, default = 0.9999,
- help = 'EMA decay, < 0 is no EMA')
+ help = 'EMA decay, <= 0 is no EMA')
data_list = ', '.join( [ str(k) for k in samplers ])
def __init__(self, model, decay):
self.model = model
self.decay = decay
- if self.decay < 0: return
- self.ema = { }
+ self.mem = { }
with torch.no_grad():
for p in model.parameters():
- self.ema[p] = p.clone()
+ self.mem[p] = p.clone()
def step(self):
- if self.decay < 0: return
with torch.no_grad():
for p in self.model.parameters():
- self.ema[p].copy_(self.decay * self.ema[p] + (1 - self.decay) * p)
+ self.mem[p].copy_(self.decay * self.mem[p] + (1 - self.decay) * p)
- def copy(self):
- if self.decay < 0: return
+ def copy_to_model(self):
with torch.no_grad():
for p in self.model.parameters():
- p.copy_(self.ema[p])
+ p.copy_(self.mem[p])
######################################################################
alpha_bar = alpha.log().cumsum(0).exp()
sigma = beta.sqrt()
-ema = EMA(model, decay = args.ema_decay)
+ema = EMA(model, decay = args.ema_decay) if args.ema_decay > 0 else None
for k in range(args.nb_epochs):
x0 = (x0 - train_mean) / train_std
t = torch.randint(T, (x0.size(0),) + (1,) * (x0.dim() - 1), device = x0.device)
eps = torch.randn_like(x0)
- input = torch.sqrt(alpha_bar[t]) * x0 + torch.sqrt(1 - alpha_bar[t]) * eps
- input = torch.cat((input, t.expand_as(x0[:,:1]) / (T - 1) - 0.5), 1)
+ xt = torch.sqrt(alpha_bar[t]) * x0 + torch.sqrt(1 - alpha_bar[t]) * eps
+ input = torch.cat((xt, t.expand_as(x0[:,:1]) / (T - 1) - 0.5), 1)
loss = (eps - model(input)).pow(2).mean()
acc_loss += loss.item() * x0.size(0)
loss.backward()
optimizer.step()
- ema.step()
+ if ema is not None: ema.step()
print(f'{k} {acc_loss / train_input.size(0)}')
-ema.copy()
+if ema is not None: ema.copy_to_model()
######################################################################
# Plot