X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=43126d5d63466e948317839e727a410e2b267c62;hb=6917d3d52a4b473d31121a471ab98fa114bdb1a6;hp=a93684b0f6797d86a2b8d227abedd3c0b5610672;hpb=4b7407bbbd9636b89f663a6a9124e078a16aaef8;p=culture.git diff --git a/world.py b/world.py index a93684b..43126d5 100755 --- a/world.py +++ b/world.py @@ -1,459 +1,129 @@ #!/usr/bin/env python +# Any copyright is dedicated to the Public Domain. +# https://creativecommons.org/publicdomain/zero/1.0/ + +# Written by Francois Fleuret + import math, sys, tqdm import torch, torchvision from torch import nn from torch.nn import functional as F -import cairo - - -class Box: - nb_rgb_levels = 10 - - def __init__(self, x, y, w, h, r, g, b): - self.x = x - self.y = y - self.w = w - self.h = h - self.r = r - self.g = g - self.b = b - - def collision(self, scene): - for c in scene: - if ( - self is not c - and max(self.x, c.x) <= min(self.x + self.w, c.x + c.w) - and max(self.y, c.y) <= min(self.y + self.h, c.y + c.h) - ): - return True - return False - - -def scene2tensor(xh, yh, scene, size): - width, height = size, size - pixel_map = torch.ByteTensor(width, height, 4).fill_(255) - data = pixel_map.numpy() - surface = cairo.ImageSurface.create_for_data( - data, cairo.FORMAT_ARGB32, width, height - ) - - ctx = cairo.Context(surface) - ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD) - - for b in scene: - ctx.move_to(b.x * size, b.y * size) - ctx.rel_line_to(b.w * size, 0) - ctx.rel_line_to(0, b.h * size) - ctx.rel_line_to(-b.w * size, 0) - ctx.close_path() - ctx.set_source_rgba( - b.r / (Box.nb_rgb_levels - 1), - b.g / (Box.nb_rgb_levels - 1), - b.b / (Box.nb_rgb_levels - 1), - 1.0, - ) - ctx.fill() - - hs = size * 0.1 - ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0) - ctx.move_to(xh * size - hs / 2, yh * size - hs / 2) - ctx.rel_line_to(hs, 0) - ctx.rel_line_to(0, hs) - ctx.rel_line_to(-hs, 0) - ctx.close_path() - ctx.fill() - - return ( - pixel_map[None, :, :, :3] - .flip(-1) - .permute(0, 3, 1, 2) - .long() - .mul(Box.nb_rgb_levels) - .floor_divide(256) - ) - - -def random_scene(): - scene = [] - colors = [ - ((Box.nb_rgb_levels - 1), 0, 0), - (0, (Box.nb_rgb_levels - 1), 0), - (0, 0, (Box.nb_rgb_levels - 1)), - ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0), - ( - (Box.nb_rgb_levels * 2) // 3, - (Box.nb_rgb_levels * 2) // 3, - (Box.nb_rgb_levels * 2) // 3, - ), - ] - - for k in range(10): - wh = torch.rand(2) * 0.2 + 0.2 - xy = torch.rand(2) * (1 - wh) - c = colors[torch.randint(len(colors), (1,))] - b = Box( - xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2] - ) - if not b.collision(scene): - scene.append(b) - - return scene - - -def generate_episode(nb_steps=10, size=64): - delta = 0.1 - effects = [ - (False, 0, 0), - (False, delta, 0), - (False, 0, delta), - (False, -delta, 0), - (False, 0, -delta), - (True, delta, 0), - (True, 0, delta), - (True, -delta, 0), - (True, 0, -delta), - ] - - while True: - frames = [] - - scene = random_scene() - xh, yh = tuple(x.item() for x in torch.rand(2)) - - frames.append(scene2tensor(xh, yh, scene, size=size)) - - actions = torch.randint(len(effects), (nb_steps,)) - change = False - - for a in actions: - g, dx, dy = effects[a] - if g: - for b in scene: - if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh: - x, y = b.x, b.y - b.x += dx - b.y += dy - if ( - b.x < 0 - or b.y < 0 - or b.x + b.w > 1 - or b.y + b.h > 1 - or b.collision(scene) - ): - b.x, b.y = x, y - else: - xh += dx - yh += dy - change = True - else: - x, y = xh, yh - xh += dx - yh += dy - if xh < 0 or xh > 1 or yh < 0 or yh > 1: - xh, yh = x, y - - frames.append(scene2tensor(xh, yh, scene, size=size)) - - if change: - break - - return frames, actions - ###################################################################### -# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2 -def sq2matrix(x, c): - nx = x.pow(2).sum(1) - nc = c.pow(2).sum(1) - return nx[:, None] + nc[None, :] - 2 * x @ c.t() - - -def update_centroids(x, c, nb_min=1): - _, b = sq2matrix(x, c).min(1) - b.squeeze_() - nb_resets = 0 - - for k in range(0, c.size(0)): - i = b.eq(k).nonzero(as_tuple=False).squeeze() - if i.numel() >= nb_min: - c[k] = x.index_select(0, i).mean(0) - else: - n = torch.randint(x.size(0), (1,)) - nb_resets += 1 - c[k] = x[n] - - return c, b, nb_resets - - -def kmeans(x, nb_centroids, nb_min=1): - if x.size(0) < nb_centroids * nb_min: - print("Not enough points!") - exit(1) - - c = x[torch.randperm(x.size(0))[:nb_centroids]] - t = torch.full((x.size(0),), -1) - n = 0 - - while True: - c, u, nb_resets = update_centroids(x, c, nb_min) - n = n + 1 - nb_changes = (u - t).sign().abs().sum() + nb_resets - t = u - if nb_changes == 0: - break - - return c, t - - -###################################################################### - - -def patchify(x, factor, invert_size=None): - if invert_size is None: - return ( - x.reshape( - x.size(0), # 0 - x.size(1), # 1 - factor, # 2 - x.size(2) // factor, # 3 - factor, # 4 - x.size(3) // factor, # 5 - ) - .permute(0, 2, 4, 1, 3, 5) - .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor) - ) - else: - return ( - x.reshape( - invert_size[0], # 0 - factor, # 1 - factor, # 2 - invert_size[1], # 3 - invert_size[2] // factor, # 4 - invert_size[3] // factor, # 5 - ) - .permute(0, 3, 1, 4, 2, 5) - .reshape(invert_size) - ) - - -class Normalizer(nn.Module): - def __init__(self, mu, std): - super().__init__() - self.register_buffer("mu", mu) - self.register_buffer("log_var", 2 * torch.log(std)) - - def forward(self, x): - return (x - self.mu) / torch.exp(self.log_var / 2.0) - - -class SignSTE(nn.Module): - def __init__(self): - super().__init__() +colors = torch.tensor( + [ + [255, 255, 255], + [0, 0, 0], + [255, 0, 0], + [0, 128, 0], + [0, 0, 255], + [255, 255, 0], + [192, 192, 192], + ] +) - def forward(self, x): - # torch.sign() takes three values - s = (x >= 0).float() * 2 - 1 - if self.training: - u = torch.tanh(x) - return s + u - u.detach() - else: - return s +token2char = "_X" + "".join([str(n) for n in range(len(colors) - 2)]) + ">" -def train_encoder( - train_input, - test_input, - depth=2, - dim_hidden=48, - nb_bits_per_token=10, - lr_start=1e-3, - lr_end=1e-4, - nb_epochs=10, - batch_size=25, - device=torch.device("cpu"), +def generate( + nb, + height, + width, + max_nb_obj=2, + nb_iterations=2, ): - mu, std = train_input.float().mean(), train_input.float().std() - - def encoder_core(depth, dim): - l = [ - [ - nn.Conv2d( - dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 - ), - nn.ReLU(), - nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2), - nn.ReLU(), - ] - for k in range(depth) - ] - - return nn.Sequential(*[x for m in l for x in m]) - - def decoder_core(depth, dim): - l = [ - [ - nn.ConvTranspose2d( - dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2 - ), - nn.ReLU(), - nn.ConvTranspose2d( - dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 - ), - nn.ReLU(), - ] - for k in range(depth - 1, -1, -1) - ] - - return nn.Sequential(*[x for m in l for x in m]) - - encoder = nn.Sequential( - Normalizer(mu, std), - nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1), - nn.ReLU(), - # 64x64 - encoder_core(depth=depth, dim=dim_hidden), - # 8x8 - nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1), - ) - - quantizer = SignSTE() - - decoder = nn.Sequential( - nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1), - # 8x8 - decoder_core(depth=depth, dim=dim_hidden), - # 64x64 - nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1), - ) - - model = nn.Sequential(encoder, decoder) - - nb_parameters = sum(p.numel() for p in model.parameters()) - - print(f"nb_parameters {nb_parameters}") - - model.to(device) - - g5x5 = torch.exp(-torch.tensor([[-2.0, -1.0, 0.0, 1.0, 2.0]]) ** 2 / 2) - g5x5 = (g5x5.t() @ g5x5).view(1, 1, 5, 5) - g5x5 = g5x5 / g5x5.sum() - - for k in range(nb_epochs): - lr = math.exp( - math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k - ) - optimizer = torch.optim.Adam(model.parameters(), lr=lr) - - acc_train_loss = 0.0 - - for input in train_input.split(batch_size): - z = encoder(input) - zq = z if k < 1 else quantizer(z) - output = decoder(zq) - - output = output.reshape( - output.size(0), -1, 3, output.size(2), output.size(3) - ) - - train_loss = F.cross_entropy(output, input) - - acc_train_loss += train_loss.item() * input.size(0) - - optimizer.zero_grad() - train_loss.backward() - optimizer.step() - - acc_test_loss = 0.0 - - for input in test_input.split(batch_size): - z = encoder(input) - zq = z if k < 1 else quantizer(z) - output = decoder(zq) - - output = output.reshape( - output.size(0), -1, 3, output.size(2), output.size(3) + f_start = torch.zeros(nb, height, width, dtype=torch.int64) + f_end = torch.zeros(nb, height, width, dtype=torch.int64) + n = torch.arange(f_start.size(0)) + + for n in range(nb): + nb_fish = torch.randint(max_nb_obj, (1,)).item() + 1 + for c in torch.randperm(colors.size(0) - 2)[:nb_fish].sort().values: + i, j = ( + torch.randint(height - 2, (1,))[0] + 1, + torch.randint(width - 2, (1,))[0] + 1, ) + vm = torch.randint(4, (1,))[0] + vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * (2 * (vm % 2) - 1) + + f_start[n, i, j] = c + 2 + f_start[n, i - vi, j - vj] = c + 2 + f_start[n, i + vj, j - vi] = c + 2 + f_start[n, i - vj, j + vi] = c + 2 + + for l in range(nb_iterations): + i += vi + j += vj + if i < 0 or i >= height or j < 0 or j >= width: + i -= vi + j -= vj + vi, vj = -vi, -vj + i += vi + j += vj + + f_end[n, i, j] = c + 2 + f_end[n, i - vi, j - vj] = c + 2 + f_end[n, i + vj, j - vi] = c + 2 + f_end[n, i - vj, j + vi] = c + 2 + + return torch.cat( + [ + f_end.flatten(1), + torch.full((f_end.size(0), 1), len(colors)), + f_start.flatten(1), + ], + dim=1, + ) - test_loss = F.cross_entropy(output, input) - - acc_test_loss += test_loss.item() * input.size(0) - - train_loss = acc_train_loss / train_input.size(0) - test_loss = acc_test_loss / test_input.size(0) - - print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}") - sys.stdout.flush() - - return encoder, quantizer, decoder - -def generate_episodes(nb): - all_frames = [] - for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"): - frames, actions = generate_episode(nb_steps=31) - all_frames += [ frames[0], frames[-1] ] - return torch.cat(all_frames, 0).contiguous() - -def create_data_and_processors(nb_train_samples, nb_test_samples): - train_input = generate_episodes(nb_train_samples) - test_input = generate_episodes(nb_test_samples) - encoder, quantizer, decoder = train_encoder(train_input, test_input, nb_epochs=2) - - input = test_input[:64] - - z = encoder(input.float()) - height, width = z.size(2), z.size(3) - zq = quantizer(z).long() - pow2=(2**torch.arange(zq.size(1), device=zq.device))[None,None,:] - seq = (zq.permute(0,2,3,1).clamp(min=0).reshape(zq.size(0),-1,zq.size(1)) * pow2).sum(-1) - print(f"{seq.size()=}") - - ZZ=zq - zq = ((seq[:,:,None] // pow2)%2)*2-1 - zq = zq.reshape(zq.size(0), height, width, -1).permute(0,3,1,2) +def sample2img(seq, height, width): + f_start = seq[:, : height * width].reshape(-1, height, width) + f_start = (f_start >= len(colors)).long() + (f_start < len(colors)).long() * f_start + f_end = seq[:, height * width + 1 :].reshape(-1, height, width) + f_end = (f_end >= len(colors)).long() + (f_end < len(colors)).long() * f_end - print(ZZ[0]) - print(zq[0]) + img_f_start, img_f_end = colors[f_start], colors[f_end] - print("CHECK", (ZZ-zq).abs().sum()) + img = torch.cat( + [ + img_f_start, + torch.full( + (img_f_start.size(0), img_f_start.size(1), 1, img_f_start.size(3)), 1 + ), + img_f_end, + ], + dim=2, + ) - results = decoder(zq.float()) - T = 0.1 - results = results.reshape( - results.size(0), -1, 3, results.size(2), results.size(3) - ).permute(0, 2, 3, 4, 1) - results = torch.distributions.categorical.Categorical(logits=results / T).sample() + return img.permute(0, 3, 1, 2) - torchvision.utils.save_image( - input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8 - ) - - torchvision.utils.save_image( - results.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8 - ) +def seq2str(seq): + result = [] + for s in seq: + result.append("".join([token2char[v] for v in s])) + return result ###################################################################### if __name__ == "__main__": - create_data_and_processors(250,100) + import time - # train_input = generate_episodes(2500) - # test_input = generate_episodes(1000) + height, width = 6, 8 + start_time = time.perf_counter() + seq = generate(nb=64, height=height, width=width, max_nb_obj=3) + delay = time.perf_counter() - start_time + print(f"{seq.size(0)/delay:02f} samples/s") - # encoder, quantizer, decoder = train_encoder(train_input, test_input) + print(seq2str(seq[:4])) - # input = test_input[torch.randperm(test_input.size(0))[:64]] - # z = encoder(input.float()) - # zq = quantizer(z) - # results = decoder(zq) + img = sample2img(seq, height, width) + print(img.size()) - # T = 0.1 - # results = torch.distributions.categorical.Categorical(logits=results / T).sample() + torchvision.utils.save_image(img.float() / 255.0, "world.png", nrow=8, padding=2)