- f_start[i[n], j[n]] = c
- f_start[i[n] - vi[n], j[n]] = c
- f_start[i[n], j[n] - vj[n]] = c
-
- f_end = f_start.clone()
-
- for l in range(nb_iterations):
- iterations.append(f_end.clone())
- f_end[...] = 0
- nb_collisions = 0
- for n in range(nb_birds):
- c = col[n]
-
- pi, pj, pvi, pvj = (
- i[n].item(),
- j[n].item(),
- vi[n].item(),
- vj[n].item(),
- )
-
- if (i[n] == 0 and vi[n] == -1) or (
- i[n] == height - 1 and vi[n] == 1
- ):
- vi[n] = -vi[n]
- if (j[n] == 0 and vj[n] == -1) or (
- j[n] == width - 1 and vj[n] == 1
- ):
- vj[n] = -vj[n]
-
- i[n] += vi[n]
- j[n] += vj[n]
-
- if not (
- f_end[i[n], j[n]] == 0
- and f_end[i[n] - vi[n], j[n]] == 0
- and f_end[i[n], j[n] - vj[n]] == 0
- ):
- nb_collisions += 1
-
- f_end[i[n], j[n]] = c
- f_end[i[n] - vi[n], j[n]] = c
- f_end[i[n], j[n] - vj[n]] = c
-
- iterations.append(f_end.clone())
-
- if nb_collisions == 0:
- break
-
- kept_iterations.append(iterations)
- pairs.append((f_start, f_end))
-
- result = []
- for p in pairs:
- if torch.rand(1) < 0.5:
- result.append(
- torch.cat(
- [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()],
- dim=0,
- )[None, :]
- )
- else:
- result.append(
- torch.cat(
- [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()],
- dim=0,
- )[None, :]
- )
-
- if return_iterations:
- # iterations = torch.cat([ torch.cat([ x[None, None] for x in l], dim = 1) for l in kept_iterations ], dim=0)
- return torch.cat(result, dim=0), kept_iterations
- else:
- return torch.cat(result, dim=0)
+ result = torch.zeros(
+ self.nb_iterations * self.speed,
+ self.height,
+ self.width,
+ dtype=torch.int64,
+ )
+
+ fine = torch.empty(self.nb_iterations * self.speed)
+
+ t_to_keep = (
+ torch.arange(self.nb_iterations, device=result.device) * self.speed
+ )
+
+ for l in range(self.nb_iterations * self.speed):
+ fine[l] = collision_okay()
+ for n in range(self.nb_birds):
+ c = col[n]
+ result[l, i[n], j[n]] = c
+ result[l, i[n] - vi[n], j[n]] = c
+ result[l, i[n], j[n] - vj[n]] = c
+
+ if (i[n] == 0 and vi[n] == -1) or (
+ i[n] == self.height - 1 and vi[n] == 1
+ ):
+ vi[n] = -vi[n]
+
+ if (j[n] == 0 and vj[n] == -1) or (
+ j[n] == self.width - 1 and vj[n] == 1
+ ):
+ vj[n] = -vj[n]
+
+ i[n] += vi[n]
+ j[n] += vj[n]
+
+ result = result[t_to_keep]
+ fine = fine[t_to_keep]
+
+ if fine[-1]:
+ break
+
+ frame_sequences.append(result)
+
+ return frame_sequences
+
+ ######################################################################
+
+ def frame2img(self, x, scale=15):
+ x = x.reshape(x.size(0), self.height, -1)
+ m = torch.logical_and(
+ x >= 0, x < self.first_bird_token + self.nb_bird_tokens
+ ).long()
+ x = self.colors[x * m].permute(0, 3, 1, 2)
+ s = x.shape
+ x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
+ x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
+
+ x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
+ x[:, :, torch.arange(0, x.size(2), scale), :] = 0
+ x = x[:, :, 1:, 1:]
+
+ for n in range(m.size(0)):
+ for i in range(m.size(1)):
+ for j in range(m.size(2)):
+ if m[n, i, j] == 0:
+ for k in range(2, scale - 2):
+ for l in [0, 1]:
+ x[n, :, i * scale + k, j * scale + k - l] = 0
+ x[
+ n, :, i * scale + scale - 1 - k, j * scale + k - l
+ ] = 0
+
+ return x
+
+ def seq2str(self, seq):
+ result = []
+ for s in seq:
+ result.append("".join([self.token2char[v] for v in s]))
+ return result
+
+ def save_image(
+ self,
+ result_dir,
+ filename,
+ prompts,
+ answers,
+ predicted_prompts=None,
+ predicted_answers=None,
+ ):
+ if predicted_prompts is None:
+ predicted_prompts = 255
+
+ if predicted_answers is None:
+ predicted_answers = 255
+
+ def add_frame(x, c, margin, bottom=False):
+ if bottom:
+ h, w, di, dj = x.size(2) + margin, x.size(3), 0, 0
+ else:
+ h, w, di, dj = (
+ x.size(2) + 2 * margin,
+ x.size(3) + 2 * margin,
+ margin,
+ margin,
+ )
+
+ y = x.new_full((x.size(0), x.size(1), h, w), 0)
+
+ if type(c) is int:
+ y[...] = c
+ else:
+ c = c.long()[:, None]
+ c = (
+ (c == 1).long() * torch.tensor([0, 255, 0], device=c.device)
+ + (c == 0).long() * torch.tensor([255, 255, 255], device=c.device)
+ + (c == -1).long() * torch.tensor([255, 0, 0], device=c.device)
+ )
+ y[...] = c[:, :, None, None]
+
+ y[:, :, di : di + x.size(2), dj : dj + x.size(3)] = x
+
+ return y
+
+ margin = 4
+
+ img_prompts = add_frame(self.frame2img(prompts.to("cpu")), c=0, margin=1)
+ h = img_prompts.size(2)
+ img_answers = add_frame(self.frame2img(answers.to("cpu")), c=0, margin=1)
+
+ img_prompts = add_frame(img_prompts, c=255, margin=margin, bottom=True)
+ img_answers = add_frame(img_answers, c=255, margin=margin, bottom=True)
+
+ img_prompts = add_frame(
+ img_prompts, c=predicted_prompts, margin=margin, bottom=True
+ )
+ img_answers = add_frame(
+ img_answers, c=predicted_answers, margin=margin, bottom=True
+ )