From 979cff406de06137b7b5fb1876b906b2eb45153e Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Thu, 27 Jun 2024 12:33:15 +0200 Subject: [PATCH] Update. --- main.py | 2 +- sky.py | 408 +++++++++++++++++++++----------------------------------- 2 files changed, 151 insertions(+), 259 deletions(-) diff --git a/main.py b/main.py index d063423..232c724 100755 --- a/main.py +++ b/main.py @@ -213,7 +213,7 @@ assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 quizz_machine = quizz_machine.QuizzMachine( - problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2), + problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2, speed=2), nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.physical_batch_size, diff --git a/sky.py b/sky.py index ac6cbdc..fdc1689 100755 --- a/sky.py +++ b/sky.py @@ -44,220 +44,99 @@ class Sky(problem.Problem): "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><" ) - def __init__(self, height=6, width=8, nb_birds=3, nb_iterations=2): + def __init__(self, height=6, width=8, nb_birds=3, speed=1, nb_iterations=4): self.height = height self.width = width self.nb_birds = nb_birds + self.speed = speed self.nb_iterations = nb_iterations def direction_tokens(self): return self.token_forward, self.token_backward - def generate_seq(self, nb, return_iterations=False): - pairs = [] - kept_iterations = [] + def generate_seq(self, nb, return_frame_sequences=False): + frame_sequences = [] for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): - while True: - iterations = [] - - f_start = torch.zeros(self.height, self.width, dtype=torch.int64) - - i, j, vi, vj = ( - torch.empty(self.nb_birds, dtype=torch.int64), - torch.empty(self.nb_birds, dtype=torch.int64), - torch.empty(self.nb_birds, dtype=torch.int64), - torch.empty(self.nb_birds, dtype=torch.int64), - ) - - col = ( - torch.randperm(self.colors.size(0) - 1)[: self.nb_birds] - .sort() - .values - + 1 - ) - + result = torch.zeros( + self.nb_iterations, self.height, self.width, dtype=torch.int64 + ) + + i, j, vi, vj = ( + torch.empty(self.nb_birds, dtype=torch.int64), + torch.empty(self.nb_birds, dtype=torch.int64), + torch.empty(self.nb_birds, dtype=torch.int64), + torch.empty(self.nb_birds, dtype=torch.int64), + ) + + col = ( + torch.randperm(self.colors.size(0) - 1)[: self.nb_birds].sort().values + + 1 + ) + + for n in range(self.nb_birds): + while True: + i[n] = torch.randint(self.height, (1,)) + j[n] = torch.randint(self.width, (1,)) + vm = torch.randint(4, (1,)) + vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1 + if ( + i[n] - vi[n] >= 0 + and i[n] - vi[n] < self.height + and j[n] - vj[n] >= 0 + and j[n] - vj[n] < self.width + ): + break + + for l in range(self.nb_iterations): for n in range(self.nb_birds): c = col[n] + result[l, i[n], j[n]] = c + result[l, i[n] - vi[n], j[n]] = c + result[l, i[n], j[n] - vj[n]] = c - while True: - i[n], j[n] = ( - torch.randint(self.height, (1,))[0], - torch.randint(self.width, (1,))[0], - ) - vm = torch.randint(4, (1,))[0] - vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1 - if ( - i[n] - vi[n] >= 0 - and i[n] - vi[n] < self.height - and j[n] - vj[n] >= 0 - and j[n] - vj[n] < self.width - and f_start[i[n], j[n]] == 0 - and f_start[i[n] - vi[n], j[n]] == 0 - and f_start[i[n], j[n] - vj[n]] == 0 - ): - break - - f_start[i[n], j[n]] = c - f_start[i[n] - vi[n], j[n]] = c - f_start[i[n], j[n] - vj[n]] = c - - f_end = f_start.clone() - - for l in range(self.nb_iterations): - iterations.append(f_end.clone()) - f_end[...] = 0 - nb_collisions = 0 - for n in range(self.nb_birds): - c = col[n] - - pi, pj, pvi, pvj = ( - i[n].item(), - j[n].item(), - vi[n].item(), - vj[n].item(), - ) - - if (i[n] == 0 and vi[n] == -1) or ( - i[n] == self.height - 1 and vi[n] == 1 - ): - vi[n] = -vi[n] - if (j[n] == 0 and vj[n] == -1) or ( - j[n] == self.width - 1 and vj[n] == 1 - ): - vj[n] = -vj[n] - - i[n] += vi[n] - j[n] += vj[n] - - if not ( - f_end[i[n], j[n]] == 0 - and f_end[i[n] - vi[n], j[n]] == 0 - and f_end[i[n], j[n] - vj[n]] == 0 - ): - nb_collisions += 1 - - f_end[i[n], j[n]] = c - f_end[i[n] - vi[n], j[n]] = c - f_end[i[n], j[n] - vj[n]] = c - - iterations.append(f_end.clone()) - - if nb_collisions == 0: - break - - kept_iterations.append(iterations) - pairs.append((f_start, f_end)) - - result = [] - for p in pairs: - if torch.rand(1) < 0.5: - result.append( - torch.cat( - [ - p[0].flatten(), - torch.tensor([self.token_forward]), - p[1].flatten(), - ], - dim=0, - )[None, :] - ) - else: - result.append( - torch.cat( - [ - p[1].flatten(), - torch.tensor([self.token_backward]), - p[0].flatten(), - ], - dim=0, - )[None, :] - ) - - if return_iterations: - # iterations = torch.cat([ torch.cat([ x[None, None] for x in l], dim = 1) for l in kept_iterations ], dim=0) - return torch.cat(result, dim=0), kept_iterations - else: - return torch.cat(result, dim=0) - - ###################################################################### + if (i[n] == 0 and vi[n] == -1) or ( + i[n] == self.height - 1 and vi[n] == 1 + ): + vi[n] = -vi[n] - def generate_seq_old( - self, - nb, - ): - pairs = [] - - for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"): - f_start = torch.zeros(self.height, self.width, dtype=torch.int64) - f_end = torch.zeros(self.height, self.width, dtype=torch.int64) - n = torch.arange(f_start.size(0)) - - for c in ( - (torch.randperm(self.nb_bird_tokens) + self.first_bird_token)[ - : self.nb_birds - ] - .sort() - .values - ): - i, j = ( - torch.randint(self.height - 2, (1,))[0] + 1, - torch.randint(self.width - 2, (1,))[0] + 1, - ) - vm = torch.randint(4, (1,))[0] - vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * ( - 2 * (vm % 2) - 1 - ) + if (j[n] == 0 and vj[n] == -1) or ( + j[n] == self.width - 1 and vj[n] == 1 + ): + vj[n] = -vj[n] - f_start[i, j] = c - f_start[i - vi, j - vj] = c - f_start[i + vj, j - vi] = c - f_start[i - vj, j + vi] = c + i[n] += vi[n] + j[n] += vj[n] - for l in range(self.nb_iterations): - i += vi - j += vj - if i < 0 or i >= self.height or j < 0 or j >= self.width: - i -= vi - j -= vj - vi, vj = -vi, -vj - i += vi - j += vj + frame_sequences.append(result) - f_end[i, j] = c - f_end[i - vi, j - vj] = c - f_end[i + vj, j - vi] = c - f_end[i - vj, j + vi] = c + if return_frame_sequences: + return frame_sequences - pairs.append((f_start, f_end)) + # Randomize the time direction, annd convert to token + # sequences with the time direction tokens added result = [] - for p in pairs: + + for frame_sequence in frame_sequences: + a = [] if torch.rand(1) < 0.5: - result.append( - torch.cat( - [ - p[0].flatten(), - torch.tensor([self.token_forward]), - p[1].flatten(), - ], - dim=0, - )[None, :] - ) + for frame in frame_sequence: + if len(a) > 0: + a.append(torch.tensor([self.token_forward])) + a.append(frame.flatten()) else: - result.append( - torch.cat( - [ - p[1].flatten(), - torch.tensor([self.token_backward]), - p[0].flatten(), - ], - dim=0, - )[None, :] - ) + for frame in reversed(frame_sequence): + if len(a) > 0: + a.append(torch.tensor([self.token_backward])) + a.append(frame.flatten()) + + result.append(torch.cat(a, dim=0)[None, :]) return torch.cat(result, dim=0) + ###################################################################### + def frame2img(self, x, scale=15): x = x.reshape(-1, self.height, self.width) m = torch.logical_and( @@ -286,65 +165,77 @@ class Sky(problem.Problem): return x def seq2img(self, seq, scale=15): - f_first = seq[:, : self.height * self.width].reshape( - -1, self.height, self.width - ) - f_second = seq[:, self.height * self.width + 1 :].reshape( - -1, self.height, self.width - ) - direction = seq[:, self.height * self.width] - - direction_symbol = torch.full( - (direction.size(0), self.height * scale - 1, scale), 0 - ) - direction_symbol = self.colors[direction_symbol].permute(0, 3, 1, 2) - separator = torch.full((direction.size(0), 3, self.height * scale - 1, 1), 0) - - for n in range(direction_symbol.size(0)): - if direction[n] == self.token_forward: - for k in range(scale): - for l in [0, 1]: - direction_symbol[ - n, - :, - (self.height * scale) // 2 - scale // 2 + k - l, - 3 + scale // 2 - abs(k - scale // 2), - ] = 0 - elif direction[n] == self.token_backward: - for k in range(scale): - for l in [0, 1]: - direction_symbol[ - n, - :, - (self.height * scale) // 2 - scale // 2 + k - l, - 3 + abs(k - scale // 2), - ] = 0 - else: - for k in range(2, scale - 2): - for l in [0, 1]: - direction_symbol[ - n, - :, - (self.height * scale) // 2 - scale // 2 + k - l, - k, - ] = 0 - direction_symbol[ - n, - :, - (self.height * scale) // 2 - scale // 2 + k - l, - scale - 1 - k, - ] = 0 - - return torch.cat( - [ - self.frame2img(f_first, scale), + all = [ + self.frame2img( + seq[:, : self.height * self.width].reshape(-1, self.height, self.width), + scale, + ) + ] + + separator = torch.full((seq.size(0), 3, self.height * scale - 1, 1), 0) + + t = self.height * self.width + + while t < seq.size(1): + direction_tokens = seq[:, t] + t += 1 + + direction_images = self.colors[ + torch.full( + (direction_tokens.size(0), self.height * scale - 1, scale), 0 + ) + ].permute(0, 3, 1, 2) + + for n in range(direction_tokens.size(0)): + if direction_tokens[n] == self.token_forward: + for k in range(scale): + for l in [0, 1]: + direction_images[ + n, + :, + (self.height * scale) // 2 - scale // 2 + k - l, + 3 + scale // 2 - abs(k - scale // 2), + ] = 0 + elif direction_tokens[n] == self.token_backward: + for k in range(scale): + for l in [0, 1]: + direction_images[ + n, + :, + (self.height * scale) // 2 - scale // 2 + k - l, + 3 + abs(k - scale // 2), + ] = 0 + else: + for k in range(2, scale - 2): + for l in [0, 1]: + direction_images[ + n, + :, + (self.height * scale) // 2 - scale // 2 + k - l, + k, + ] = 0 + direction_images[ + n, + :, + (self.height * scale) // 2 - scale // 2 + k - l, + scale - 1 - k, + ] = 0 + + all += [ separator, - direction_symbol, + direction_images, separator, - self.frame2img(f_second, scale), - ], - dim=3, - ) + self.frame2img( + seq[:, t : t + self.height * self.width].reshape( + -1, self.height, self.width + ), + scale, + ), + ] + + t += self.height * self.width + + return torch.cat(all, dim=3) def seq2str(self, seq): result = [] @@ -366,28 +257,29 @@ class Sky(problem.Problem): if __name__ == "__main__": import time - sky = Sky(height=6, width=8, nb_iterations=100) + sky = Sky(height=6, width=8, speed=1, nb_iterations=4) start_time = time.perf_counter() - seq, it = sky.generate_seq(nb=64, return_iterations=True) + seq = sky.generate_seq(nb=64) delay = time.perf_counter() - start_time print(f"{seq.size(0)/delay:02f} seq/s") - print(sky.seq2str(seq[:4])) + # print(sky.seq2str(seq[:4])) - for t in range(len(it[0])): - img = torch.cat([sky.frame2img(f[t]) for f in it], dim=0) - torchvision.utils.save_image( - img.float() / 255.0, - f"/tmp/frame_{t:03d}.png", - nrow=8, - padding=6, - pad_value=0, - ) + # for t in range(len(it[0])): + # img = torch.cat([sky.frame2img(f[t]) for f in it], dim=0) + # torchvision.utils.save_image( + # img.float() / 255.0, + # f"/tmp/frame_{t:03d}.png", + # nrow=8, + # padding=6, + # pad_value=0, + # ) # m = (torch.rand(seq.size()) < 0.05).long() # seq = (1 - m) * seq + m * 23 + print(seq.size()) img = sky.seq2img(seq) print(img.size()) -- 2.39.5