X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=118a470b29b159d97826dc895362db8e8d673ded;hb=8adf0586ee5aeb9fbdf81b78c7ff4b484a9b82ab;hp=fb5d5c796990ef476ff79fb9ead66615a17fd7fd;hpb=a8f039a9b491b1b4b47f6b9f8123c7261e758661;p=culture.git diff --git a/world.py b/world.py index fb5d5c7..118a470 100755 --- a/world.py +++ b/world.py @@ -1,259 +1,129 @@ #!/usr/bin/env python -import math +# Any copyright is dedicated to the Public Domain. +# https://creativecommons.org/publicdomain/zero/1.0/ + +# Written by Francois Fleuret + +import math, sys, tqdm import torch, torchvision from torch import nn from torch.nn import functional as F -import cairo - - -class Box: - def __init__(self, x, y, w, h, r, g, b): - self.x = x - self.y = y - self.w = w - self.h = h - self.r = r - self.g = g - self.b = b - - def collision(self, scene): - for c in scene: - if ( - self is not c - and max(self.x, c.x) <= min(self.x + self.w, c.x + c.w) - and max(self.y, c.y) <= min(self.y + self.h, c.y + c.h) - ): - return True - return False - - -def scene2tensor(xh, yh, scene, size=64): - width, height = size, size - pixel_map = torch.ByteTensor(width, height, 4).fill_(255) - data = pixel_map.numpy() - surface = cairo.ImageSurface.create_for_data( - data, cairo.FORMAT_ARGB32, width, height - ) - - ctx = cairo.Context(surface) - ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD) - - for b in scene: - ctx.move_to(b.x * size, b.y * size) - ctx.rel_line_to(b.w * size, 0) - ctx.rel_line_to(0, b.h * size) - ctx.rel_line_to(-b.w * size, 0) - ctx.close_path() - ctx.set_source_rgba(b.r, b.g, b.b, 1.0) - ctx.fill() - - hs = size * 0.1 - ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0) - ctx.move_to(xh * size - hs / 2, yh * size - hs / 2) - ctx.rel_line_to(hs, 0) - ctx.rel_line_to(0, hs) - ctx.rel_line_to(-hs, 0) - ctx.close_path() - ctx.fill() - - return pixel_map[None, :, :, :3].flip(-1).permute(0, 3, 1, 2).float() / 255 - - -def random_scene(): - scene = [] - colors = [ - (1.00, 0.00, 0.00), - (0.00, 1.00, 0.00), - (0.60, 0.60, 1.00), - (1.00, 1.00, 0.00), - (0.75, 0.75, 0.75), - ] - - for k in range(10): - wh = torch.rand(2) * 0.2 + 0.2 - xy = torch.rand(2) * (1 - wh) - c = colors[torch.randint(len(colors), (1,))] - b = Box( - xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2] - ) - if not b.collision(scene): - scene.append(b) - - return scene - - -def sequence(nb_steps=10, all_frames=False): - delta = 0.1 - effects = [ - (False, 0, 0), - (False, delta, 0), - (False, 0, delta), - (False, -delta, 0), - (False, 0, -delta), - (True, delta, 0), - (True, 0, delta), - (True, -delta, 0), - (True, 0, -delta), - ] - - while True: - frames = [] - - scene = random_scene() - xh, yh = tuple(x.item() for x in torch.rand(2)) - - frames.append(scene2tensor(xh, yh, scene)) - - actions = torch.randint(len(effects), (nb_steps,)) - change = False - - for a in actions: - g, dx, dy = effects[a] - if g: - for b in scene: - if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh: - x, y = b.x, b.y - b.x += dx - b.y += dy - if ( - b.x < 0 - or b.y < 0 - or b.x + b.w > 1 - or b.y + b.h > 1 - or b.collision(scene) - ): - b.x, b.y = x, y - else: - xh += dx - yh += dy - change = True - else: - x, y = xh, yh - xh += dx - yh += dy - if xh < 0 or xh > 1 or yh < 0 or yh > 1: - xh, yh = x, y - - if all_frames: - frames.append(scene2tensor(xh, yh, scene)) - - if not all_frames: - frames.append(scene2tensor(xh, yh, scene)) - - if change: - break - - return frames, actions - ###################################################################### -# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2 -def sq2matrix(x, c): - nx = x.pow(2).sum(1) - nc = c.pow(2).sum(1) - return nx[:, None] + nc[None, :] - 2 * x @ c.t() - - -def update_centroids(x, c, nb_min=1): - _, b = sq2matrix(x, c).min(1) - b.squeeze_() - nb_resets = 0 +colors = torch.tensor( + [ + [255, 255, 255], + [0, 0, 0], + [255, 0, 0], + [0, 128, 0], + [0, 0, 255], + [255, 200, 0], + [192, 192, 192], + ] +) + +token2char = "_X" + "".join([str(n) for n in range(len(colors) - 2)]) + ">" + + +def generate( + nb, + height, + width, + max_nb_obj=2, + nb_iterations=2, +): + f_start = torch.zeros(nb, height, width, dtype=torch.int64) + f_end = torch.zeros(nb, height, width, dtype=torch.int64) + n = torch.arange(f_start.size(0)) + + for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): + nb_fish = torch.randint(max_nb_obj, (1,)).item() + 1 + for c in torch.randperm(colors.size(0) - 2)[:nb_fish].sort().values: + i, j = ( + torch.randint(height - 2, (1,))[0] + 1, + torch.randint(width - 2, (1,))[0] + 1, + ) + vm = torch.randint(4, (1,))[0] + vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * (2 * (vm % 2) - 1) + + f_start[n, i, j] = c + 2 + f_start[n, i - vi, j - vj] = c + 2 + f_start[n, i + vj, j - vi] = c + 2 + f_start[n, i - vj, j + vi] = c + 2 + + for l in range(nb_iterations): + i += vi + j += vj + if i < 0 or i >= height or j < 0 or j >= width: + i -= vi + j -= vj + vi, vj = -vi, -vj + i += vi + j += vj + + f_end[n, i, j] = c + 2 + f_end[n, i - vi, j - vj] = c + 2 + f_end[n, i + vj, j - vi] = c + 2 + f_end[n, i - vj, j + vi] = c + 2 + + return torch.cat( + [ + f_end.flatten(1), + torch.full((f_end.size(0), 1), len(colors)), + f_start.flatten(1), + ], + dim=1, + ) - for k in range(0, c.size(0)): - i = b.eq(k).nonzero(as_tuple=False).squeeze() - if i.numel() >= nb_min: - c[k] = x.index_select(0, i).mean(0) - else: - n = torch.randint(x.size(0), (1,)) - nb_resets += 1 - c[k] = x[n] - return c, b, nb_resets +def sample2img(seq, height, width): + f_start = seq[:, : height * width].reshape(-1, height, width) + f_start = (f_start >= len(colors)).long() + (f_start < len(colors)).long() * f_start + f_end = seq[:, height * width + 1 :].reshape(-1, height, width) + f_end = (f_end >= len(colors)).long() + (f_end < len(colors)).long() * f_end + img_f_start, img_f_end = colors[f_start], colors[f_end] -def kmeans(x, nb_centroids, nb_min=1): - if x.size(0) < nb_centroids * nb_min: - print("Not enough points!") - exit(1) + img = torch.cat( + [ + img_f_start, + torch.full( + (img_f_start.size(0), img_f_start.size(1), 1, img_f_start.size(3)), 1 + ), + img_f_end, + ], + dim=2, + ) - c = x[torch.randperm(x.size(0))[:nb_centroids]] - t = torch.full((x.size(0),), -1) - n = 0 + return img.permute(0, 3, 1, 2) - while True: - c, u, nb_resets = update_centroids(x, c, nb_min) - n = n + 1 - nb_changes = (u - t).sign().abs().sum() + nb_resets - t = u - if nb_changes == 0: - break - return c, t +def seq2str(seq): + result = [] + for s in seq: + result.append("".join([token2char[v] for v in s])) + return result ###################################################################### - -def patchify(x, factor, invert_size=None): - if invert_size is None: - return ( - x.reshape( - x.size(0), #0 - x.size(1), #1 - factor, #2 - x.size(2) // factor,#3 - factor,#4 - x.size(3) // factor,#5 - ) - .permute(0, 2, 4, 1, 3, 5) - .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor) - ) - else: - return ( - x.reshape( - invert_size[0], #0 - factor, #1 - factor, #2 - invert_size[1], #3 - invert_size[2] // factor, #4 - invert_size[3] // factor, #5 - ) - .permute(0, 3, 1, 4, 2, 5) - .reshape(invert_size) - ) - - if __name__ == "__main__": import time - all_frames = [] - nb = 1000 + height, width = 6, 8 start_time = time.perf_counter() - for n in range(nb): - frames, actions = sequence(nb_steps=31) - all_frames += frames - end_time = time.perf_counter() - print(f"{nb / (end_time - start_time):.02f} samples per second") - - input = torch.cat(all_frames, 0) - x = patchify(input, 8) - y = x.reshape(x.size(0), -1) - print(f"{x.size()=} {y.size()=}") - centroids, t = kmeans(y, 4096) - results = centroids[t] - results = results.reshape(x.size()) - results = patchify(results, 8, input.size()) + seq = generate(nb=64, height=height, width=width, max_nb_obj=3) + delay = time.perf_counter() - start_time + print(f"{seq.size(0)/delay:02f} samples/s") - print(f"{input.size()=} {results.size()=}") + print(seq2str(seq[:4])) - torchvision.utils.save_image(input[:64], "orig.png", nrow=8) - torchvision.utils.save_image(results[:64], "qtiz.png", nrow=8) + img = sample2img(seq, height, width) + print(img.size()) - # frames, actions = sequence(nb_steps=31, all_frames=True) - # frames = torch.cat(frames, 0) - # torchvision.utils.save_image(frames, "seq.png", nrow=8) + torchvision.utils.save_image(img.float() / 255.0, "world.png", nrow=8, padding=2)