From: François Fleuret Date: Tue, 25 Jun 2024 13:37:47 +0000 (+0200) Subject: Update. X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=commitdiff_plain;h=6dbc18a5db82b12b06212841426896412e8bd6de;p=culture.git Update. --- diff --git a/quizz_machine.py b/quizz_machine.py index daa8a54..43fd868 100755 --- a/quizz_machine.py +++ b/quizz_machine.py @@ -64,28 +64,12 @@ def masked_inplace_autoregression( model.train(t) -###################################################################### - - -class Task: - def batches(self, split="train", nb_to_use=-1, desc=None): - pass - - def vocabulary_size(self): - pass - - def produce_results( - self, n_epoch, model, result_dir, logger, deterministic_synthesis - ): - pass - - ###################################################################### import sky -class QuizzMachine(Task): +class QuizzMachine: def save_image(self, input, result_dir, filename, logger): img = sky.seq2img(input.to("cpu"), self.height, self.width) image_name = os.path.join(result_dir, filename) diff --git a/sky.py b/sky.py index 36aa1e9..3458d85 100755 --- a/sky.py +++ b/sky.py @@ -40,267 +40,284 @@ token_backward = token_forward + 1 token2char = "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><" -def generate_seq( - nb, height, width, nb_birds=3, nb_iterations=2, return_iterations=False -): - pairs = [] - kept_iterations = [] +class Sky: + def __init__(self, height, width): + self.heigh = heigh + self.width = width - for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): - while True: - iterations = [] + def generate_seq( + nb, height, width, nb_birds=3, nb_iterations=2, return_iterations=False + ): + pairs = [] + kept_iterations = [] - f_start = torch.zeros(height, width, dtype=torch.int64) + for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): + while True: + iterations = [] + + f_start = torch.zeros(height, width, dtype=torch.int64) + + i, j, vi, vj = ( + torch.empty(nb_birds, dtype=torch.int64), + torch.empty(nb_birds, dtype=torch.int64), + torch.empty(nb_birds, dtype=torch.int64), + torch.empty(nb_birds, dtype=torch.int64), + ) + + col = torch.randperm(colors.size(0) - 1)[:nb_birds].sort().values + 1 - i, j, vi, vj = ( - torch.empty(nb_birds, dtype=torch.int64), - torch.empty(nb_birds, dtype=torch.int64), - torch.empty(nb_birds, dtype=torch.int64), - torch.empty(nb_birds, dtype=torch.int64), - ) - - col = torch.randperm(colors.size(0) - 1)[:nb_birds].sort().values + 1 - - for n in range(nb_birds): - c = col[n] - - while True: - i[n], j[n] = ( - torch.randint(height, (1,))[0], - torch.randint(width, (1,))[0], - ) - vm = torch.randint(4, (1,))[0] - vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1 - if ( - i[n] - vi[n] >= 0 - and i[n] - vi[n] < height - and j[n] - vj[n] >= 0 - and j[n] - vj[n] < width - and f_start[i[n], j[n]] == 0 - and f_start[i[n] - vi[n], j[n]] == 0 - and f_start[i[n], j[n] - vj[n]] == 0 - ): - break - - f_start[i[n], j[n]] = c - f_start[i[n] - vi[n], j[n]] = c - f_start[i[n], j[n] - vj[n]] = c - - f_end = f_start.clone() - - for l in range(nb_iterations): - iterations.append(f_end.clone()) - f_end[...] = 0 - nb_collisions = 0 for n in range(nb_birds): c = col[n] - pi, pj, pvi, pvj = ( - i[n].item(), - j[n].item(), - vi[n].item(), - vj[n].item(), - ) - - if (i[n] == 0 and vi[n] == -1) or ( - i[n] == height - 1 and vi[n] == 1 - ): - vi[n] = -vi[n] - if (j[n] == 0 and vj[n] == -1) or ( - j[n] == width - 1 and vj[n] == 1 - ): - vj[n] = -vj[n] - - i[n] += vi[n] - j[n] += vj[n] - - if not ( - f_end[i[n], j[n]] == 0 - and f_end[i[n] - vi[n], j[n]] == 0 - and f_end[i[n], j[n] - vj[n]] == 0 - ): - nb_collisions += 1 - - f_end[i[n], j[n]] = c - f_end[i[n] - vi[n], j[n]] = c - f_end[i[n], j[n] - vj[n]] = c - - iterations.append(f_end.clone()) - - if nb_collisions == 0: - break - - kept_iterations.append(iterations) - pairs.append((f_start, f_end)) - - result = [] - for p in pairs: - if torch.rand(1) < 0.5: - result.append( - torch.cat( - [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()], - dim=0, - )[None, :] - ) - else: - result.append( - torch.cat( - [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()], - dim=0, - )[None, :] - ) - - if return_iterations: - # iterations = torch.cat([ torch.cat([ x[None, None] for x in l], dim = 1) for l in kept_iterations ], dim=0) - return torch.cat(result, dim=0), kept_iterations - else: - return torch.cat(result, dim=0) + while True: + i[n], j[n] = ( + torch.randint(height, (1,))[0], + torch.randint(width, (1,))[0], + ) + vm = torch.randint(4, (1,))[0] + vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1 + if ( + i[n] - vi[n] >= 0 + and i[n] - vi[n] < height + and j[n] - vj[n] >= 0 + and j[n] - vj[n] < width + and f_start[i[n], j[n]] == 0 + and f_start[i[n] - vi[n], j[n]] == 0 + and f_start[i[n], j[n] - vj[n]] == 0 + ): + break + + f_start[i[n], j[n]] = c + f_start[i[n] - vi[n], j[n]] = c + f_start[i[n], j[n] - vj[n]] = c + + f_end = f_start.clone() + + for l in range(nb_iterations): + iterations.append(f_end.clone()) + f_end[...] = 0 + nb_collisions = 0 + for n in range(nb_birds): + c = col[n] + + pi, pj, pvi, pvj = ( + i[n].item(), + j[n].item(), + vi[n].item(), + vj[n].item(), + ) + + if (i[n] == 0 and vi[n] == -1) or ( + i[n] == height - 1 and vi[n] == 1 + ): + vi[n] = -vi[n] + if (j[n] == 0 and vj[n] == -1) or ( + j[n] == width - 1 and vj[n] == 1 + ): + vj[n] = -vj[n] + + i[n] += vi[n] + j[n] += vj[n] + + if not ( + f_end[i[n], j[n]] == 0 + and f_end[i[n] - vi[n], j[n]] == 0 + and f_end[i[n], j[n] - vj[n]] == 0 + ): + nb_collisions += 1 + + f_end[i[n], j[n]] = c + f_end[i[n] - vi[n], j[n]] = c + f_end[i[n], j[n] - vj[n]] = c + iterations.append(f_end.clone()) -###################################################################### + if nb_collisions == 0: + break + + kept_iterations.append(iterations) + pairs.append((f_start, f_end)) + + result = [] + for p in pairs: + if torch.rand(1) < 0.5: + result.append( + torch.cat( + [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()], + dim=0, + )[None, :] + ) + else: + result.append( + torch.cat( + [ + p[1].flatten(), + torch.tensor([token_backward]), + p[0].flatten(), + ], + dim=0, + )[None, :] + ) + + if return_iterations: + # iterations = torch.cat([ torch.cat([ x[None, None] for x in l], dim = 1) for l in kept_iterations ], dim=0) + return torch.cat(result, dim=0), kept_iterations + else: + return torch.cat(result, dim=0) + + ###################################################################### + def generate_seq_old( + nb, + height, + width, + nb_birds=3, + nb_iterations=2, + ): + pairs = [] -def generate_seq_old( - nb, - height, - width, - nb_birds=3, - nb_iterations=2, -): - pairs = [] - - for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): - f_start = torch.zeros(height, width, dtype=torch.int64) - f_end = torch.zeros(height, width, dtype=torch.int64) - n = torch.arange(f_start.size(0)) - - for c in ( - (torch.randperm(nb_bird_tokens) + first_bird_token)[:nb_birds].sort().values - ): - i, j = ( - torch.randint(height - 2, (1,))[0] + 1, - torch.randint(width - 2, (1,))[0] + 1, - ) - vm = torch.randint(4, (1,))[0] - vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * (2 * (vm % 2) - 1) - - f_start[i, j] = c - f_start[i - vi, j - vj] = c - f_start[i + vj, j - vi] = c - f_start[i - vj, j + vi] = c - - for l in range(nb_iterations): - i += vi - j += vj - if i < 0 or i >= height or j < 0 or j >= width: - i -= vi - j -= vj - vi, vj = -vi, -vj + for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): + f_start = torch.zeros(height, width, dtype=torch.int64) + f_end = torch.zeros(height, width, dtype=torch.int64) + n = torch.arange(f_start.size(0)) + + for c in ( + (torch.randperm(nb_bird_tokens) + first_bird_token)[:nb_birds] + .sort() + .values + ): + i, j = ( + torch.randint(height - 2, (1,))[0] + 1, + torch.randint(width - 2, (1,))[0] + 1, + ) + vm = torch.randint(4, (1,))[0] + vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * ( + 2 * (vm % 2) - 1 + ) + + f_start[i, j] = c + f_start[i - vi, j - vj] = c + f_start[i + vj, j - vi] = c + f_start[i - vj, j + vi] = c + + for l in range(nb_iterations): i += vi j += vj + if i < 0 or i >= height or j < 0 or j >= width: + i -= vi + j -= vj + vi, vj = -vi, -vj + i += vi + j += vj + + f_end[i, j] = c + f_end[i - vi, j - vj] = c + f_end[i + vj, j - vi] = c + f_end[i - vj, j + vi] = c + + pairs.append((f_start, f_end)) + + result = [] + for p in pairs: + if torch.rand(1) < 0.5: + result.append( + torch.cat( + [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()], + dim=0, + )[None, :] + ) + else: + result.append( + torch.cat( + [ + p[1].flatten(), + torch.tensor([token_backward]), + p[0].flatten(), + ], + dim=0, + )[None, :] + ) - f_end[i, j] = c - f_end[i - vi, j - vj] = c - f_end[i + vj, j - vi] = c - f_end[i - vj, j + vi] = c - - pairs.append((f_start, f_end)) - - result = [] - for p in pairs: - if torch.rand(1) < 0.5: - result.append( - torch.cat( - [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()], - dim=0, - )[None, :] - ) - else: - result.append( - torch.cat( - [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()], - dim=0, - )[None, :] - ) - - return torch.cat(result, dim=0) - - -def frame2img(x, height, width, upscale=15): - x = x.reshape(-1, height, width) - m = torch.logical_and(x >= 0, x < first_bird_token + nb_bird_tokens).long() - x = colors[x * m].permute(0, 3, 1, 2) - s = x.shape - x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale) - x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale) - - x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0 - x[:, :, torch.arange(0, x.size(2), upscale), :] = 0 - x = x[:, :, 1:, 1:] - - for n in range(m.size(0)): - for i in range(m.size(1)): - for j in range(m.size(2)): - if m[n, i, j] == 0: - for k in range(2, upscale - 2): - x[n, :, i * upscale + k, j * upscale + k] = 0 - x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0 - - return x - - -def seq2img(seq, height, width, upscale=15): - f_first = seq[:, : height * width].reshape(-1, height, width) - f_second = seq[:, height * width + 1 :].reshape(-1, height, width) - direction = seq[:, height * width] - - direction_symbol = torch.full((direction.size(0), height * upscale - 1, upscale), 0) - direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2) - separator = torch.full((direction.size(0), 3, height * upscale - 1, 1), 0) - - for n in range(direction_symbol.size(0)): - if direction[n] == token_forward: - for k in range(upscale): - direction_symbol[ - n, - :, - (height * upscale) // 2 - upscale // 2 + k, - 3 + upscale // 2 - abs(k - upscale // 2), - ] = 0 - elif direction[n] == token_backward: - for k in range(upscale): - direction_symbol[ - n, - :, - (height * upscale) // 2 - upscale // 2 + k, - 3 + abs(k - upscale // 2), - ] = 0 - else: - for k in range(2, upscale - 2): - direction_symbol[ - n, :, (height * upscale) // 2 - upscale // 2 + k, k - ] = 0 - direction_symbol[ - n, :, (height * upscale) // 2 - upscale // 2 + k, upscale - 1 - k - ] = 0 - - return torch.cat( - [ - frame2img(f_first, height, width, upscale), - separator, - direction_symbol, - separator, - frame2img(f_second, height, width, upscale), - ], - dim=3, - ) + return torch.cat(result, dim=0) + def frame2img(x, height, width, upscale=15): + x = x.reshape(-1, height, width) + m = torch.logical_and(x >= 0, x < first_bird_token + nb_bird_tokens).long() + x = colors[x * m].permute(0, 3, 1, 2) + s = x.shape + x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale) + x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale) + + x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0 + x[:, :, torch.arange(0, x.size(2), upscale), :] = 0 + x = x[:, :, 1:, 1:] + + for n in range(m.size(0)): + for i in range(m.size(1)): + for j in range(m.size(2)): + if m[n, i, j] == 0: + for k in range(2, upscale - 2): + x[n, :, i * upscale + k, j * upscale + k] = 0 + x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0 + + return x + + def seq2img(seq, height, width, upscale=15): + f_first = seq[:, : height * width].reshape(-1, height, width) + f_second = seq[:, height * width + 1 :].reshape(-1, height, width) + direction = seq[:, height * width] + + direction_symbol = torch.full( + (direction.size(0), height * upscale - 1, upscale), 0 + ) + direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2) + separator = torch.full((direction.size(0), 3, height * upscale - 1, 1), 0) + + for n in range(direction_symbol.size(0)): + if direction[n] == token_forward: + for k in range(upscale): + direction_symbol[ + n, + :, + (height * upscale) // 2 - upscale // 2 + k, + 3 + upscale // 2 - abs(k - upscale // 2), + ] = 0 + elif direction[n] == token_backward: + for k in range(upscale): + direction_symbol[ + n, + :, + (height * upscale) // 2 - upscale // 2 + k, + 3 + abs(k - upscale // 2), + ] = 0 + else: + for k in range(2, upscale - 2): + direction_symbol[ + n, :, (height * upscale) // 2 - upscale // 2 + k, k + ] = 0 + direction_symbol[ + n, + :, + (height * upscale) // 2 - upscale // 2 + k, + upscale - 1 - k, + ] = 0 + + return torch.cat( + [ + frame2img(f_first, height, width, upscale), + separator, + direction_symbol, + separator, + frame2img(f_second, height, width, upscale), + ], + dim=3, + ) -def seq2str(seq): - result = [] - for s in seq: - result.append("".join([token2char[v] for v in s])) - return result + def seq2str(seq): + result = [] + for s in seq: + result.append("".join([token2char[v] for v in s])) + return result ######################################################################