+class Normalizer(nn.Module):
+ def __init__(self, mu, std):
+ super().__init__()
+ self.mu = nn.Parameter(mu)
+ self.log_var = nn.Parameter(2*torch.log(std))
+
+ def forward(self, x):
+ return (x-self.mu)/torch.exp(self.log_var/2.0)
+
+class SignSTE(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ # torch.sign() takes three values
+ s = (x >= 0).float() * 2 - 1
+ if self.training:
+ u = torch.tanh(x)
+ return s + u - u.detach()
+ else:
+ return s
+
+
+def train_encoder(
+ train_input,
+ dim_hidden=64,
+ block_size=16,
+ nb_bits_per_block=10,
+ lr_start=1e-3, lr_end=1e-5,
+ nb_epochs=50,
+ batch_size=25,
+ device=torch.device("cpu"),
+):
+ mu, std = train_input.mean(), train_input.std()
+
+ encoder = nn.Sequential(
+ Normalizer(mu, std),
+ nn.Conv2d(3, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(
+ dim_hidden,
+ nb_bits_per_block,
+ kernel_size=block_size,
+ stride=block_size,
+ padding=0,
+ ),
+ SignSTE(),
+ )
+
+ decoder = nn.Sequential(
+ nn.ConvTranspose2d(
+ nb_bits_per_block,
+ dim_hidden,
+ kernel_size=block_size,
+ stride=block_size,
+ padding=0,
+ ),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2),
+ nn.ReLU(),
+ nn.Conv2d(dim_hidden, 3, kernel_size=5, stride=1, padding=2),
+ )
+
+ model = nn.Sequential(encoder, decoder)
+
+ nb_parameters = sum(p.numel() for p in model.parameters())
+
+ print(f"nb_parameters {nb_parameters}")
+
+ model.to(device)
+
+ for k in range(nb_epochs):
+ lr=math.exp(math.log(lr_start) + math.log(lr_end/lr_start)/(nb_epochs-1)*k)
+ print(f"lr {lr}")
+ optimizer = torch.optim.Adam(model.parameters(), lr=lr)
+ acc_loss, nb_samples = 0.0, 0
+
+ for input in train_input.split(batch_size):
+ output = model(input)
+ loss = F.mse_loss(output, input)
+ acc_loss += loss.item() * input.size(0)
+ nb_samples += input.size(0)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ print(f"loss {k} {acc_loss/nb_samples}")
+ sys.stdout.flush()
+
+ return encoder, decoder
+
+
+######################################################################
+