X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=problems.py;h=b8fcdb34b32a07e8fd46900c88d889c3cf9761ea;hb=76e62a5782fc2509ce989fcfc0d0aedc17322b3a;hp=819715e1b5b1bab6af7207f8656f6aaefb8408f0;hpb=cb7001fcd7a75eaeaca9ae66fce37e372acf8cc1;p=picoclvr.git diff --git a/problems.py b/problems.py index 819715e..b8fcdb3 100755 --- a/problems.py +++ b/problems.py @@ -22,37 +22,49 @@ class Problem: nb_correct = ((result == input).long() * ar_mask).sum().item() return nb_total, nb_correct -#################### - +#################### class ProblemDegradation(Problem): - def __init__(self, nb_state_tokens=5, nb_time_steps=5, value_max=25, hard=False): + def __init__(self, nb_state_tokens=5, nb_time_steps=12, value_max=25, hard=False): + assert value_max // nb_state_tokens >= 2 self.nb_state_tokens = nb_state_tokens self.nb_time_steps = nb_time_steps self.value_max = value_max self.hard = hard - def generate_sequences(self,nb): - - x = (torch.rand(nb,self.nb_state_tokens).sort(dim=-1).indices == 0).long() * self.value_max + def generate_sequences(self, nb): + x = ( + torch.rand(nb, self.nb_state_tokens).sort(dim=-1).indices == 0 + ).long() * self.value_max seq = [x] - for t in range(self.nb_time_steps-1): - v = torch.rand(x.size()) * (x > 0).float() - u = (v.max(dim=-1,keepdim=True).values == v).long() - n = (u*x*torch.rand(x.size())).long().sum(dim=-1,keepdim=True) // 2 - x = x + n * (u.roll(shifts=-1,dims=-1) - 2 * u + u.roll(shifts=1,dims=-1)) + for t in range(self.nb_time_steps - 1): + v = (torch.rand(x.size()).sort(dim=-1).indices + 1) * (x >= 2).long() + u = (v.max(dim=-1, keepdim=True).values == v).long() + n = ( + (u * x) + .minimum(2 + torch.randint(self.value_max // 4 - 2, x.size())) + .sum(dim=-1, keepdim=True) + ) + m = 1 + ((n - 1) * torch.rand(n.size())).long() + x = ( + x + + m * u.roll(shifts=-1, dims=-1) + - n * u + + (n - m) * u.roll(shifts=1, dims=-1) + ) seq.append(x) - if self.hard: seq.reverse() + if self.hard: + seq.reverse() - seq = torch.cat(seq,dim=1) - return seq,seq.new_full(seq.size(), 1, dtype=torch.int64) + seq = torch.cat(seq, dim=1) + return seq, seq.new_full(seq.size(), 1, dtype=torch.int64) def compute_nb_correct(self, input, ar_mask, result): nb_total = result.size(0) nb_correct = 0 - e=result.new_zeros(self.nb_state_tokens) + e = result.new_zeros(self.nb_state_tokens) for seq in result: states = list(seq.split(self.nb_state_tokens)) @@ -60,27 +72,38 @@ class ProblemDegradation(Problem): states.reverse() d = states[0] - j=d.sort(descending=True).indices[0] + j = d.sort(descending=True).indices[0] e.zero_() - e[j]=self.value_max - if (d-e).abs().sum() == 0: + e[j] = self.value_max + if (d - e).abs().sum() == 0: nb_errors = 0 - for k in range(len(states)-1): - d=states[k]-states[k+1] - j=d.sort(descending=True).indices[0] - e.zero_() - e[j]=d[j] - e[(j+1)%e.size(0)]=-d[j]//2 - e[(j-1)%e.size(0)]=-d[j]//2 - if (d-e).abs().sum() > 0: + for k in range(len(states) - 1): + d = states[k + 1] - states[k] + j = d.sort(descending=False).indices[0] + if ( + d[j] == 0 + or d[j] > self.value_max // 4 + or d[(j + 1) % e.size(0)] <= 0 + or d[(j + 1) % e.size(0)] >= -d[j] + ): nb_errors += 1 + else: + e.zero_() + e[j] = d[j] + e[(j + 1) % e.size(0)] = d[(j + 1) % e.size(0)] + e[(j - 1) % e.size(0)] = -d[(j + 1) % e.size(0)] - d[j] + if (d - e).abs().sum() > 0: + nb_errors += 1 if nb_errors == 0: nb_correct += 1 return nb_total, nb_correct def seq2str(self, seq): - return " | ".join( [ " ".join([f"{x:02d}" for x in s ]) for s in seq.split(self.nb_state_tokens) ] ) + return " | ".join( + [" ".join([f"{x:02d}" for x in s]) for s in seq.split(self.nb_state_tokens)] + ) + #################### @@ -260,8 +283,125 @@ class ProblemAddition(Problem): return "".join(self.id2char[x.item()] for x in seq) +#################### + + +class ProblemMixing(Problem): + def __init__(self, height=4, width=4, nb_time_steps=9, hard=False): + self.height = height + self.width = width + self.nb_time_steps = nb_time_steps + self.hard = hard + + def start_random(self, nb): + y = torch.arange(self.height * self.width).reshape(1, -1).expand(nb, -1) + + # m = (torch.rand(y.size()).sort(dim=-1).indices < y.size(1) // 2).long() + + i = torch.arange(self.height).reshape(1,-1,1).expand(nb,self.height,self.width) + j = torch.arange(self.width).reshape(1,1,-1).expand(nb,self.height,self.width) + + ri = torch.randint(self.height, (nb,)).reshape(nb,1,1) + rj = torch.randint(self.width, (nb,)).reshape(nb,1,1) + + m = 1 - torch.logical_or(i==ri,j==rj).long().flatten(1) + + y = (y * m + self.height * self.width * (1 - m)).reshape( + nb, self.height, self.width + ) + + return y + + def start_error(self, x): + i = torch.arange(self.height, device=x.device).reshape(1,-1,1).expand_as(x) + j = torch.arange(self.width, device=x.device).reshape(1,1,-1).expand_as(x) + + ri = (x == self.height * self.width).long().sum(dim=-1).argmax(-1).view(-1,1,1) + rj = (x == self.height * self.width).long().sum(dim=-2).argmax(-1).view(-1,1,1) + + m = 1 - torch.logical_or(i==ri,j==rj).long().flatten(1) + + x = x.flatten(1) + u = torch.arange(self.height * self.width, device = x.device).reshape(1, -1) + + d = (x - (m * u + (1 - m) * self.height * self.width)).abs().sum(-1) + return d + + def moves(self, x): + y = ( + x[:, None, :, :] + .expand(-1, self.height * 2 + self.width * 2, -1, -1) + .clone() + ) + k = 0 + + for i in range(self.height): + y[:, k, i, :] = y[:, k, i, :].roll(dims=-1, shifts=-1) + k += 1 + y[:, k, i, :] = y[:, k, i, :].roll(dims=-1, shifts=1) + k += 1 + + for j in range(self.width): + y[:, k, :, j] = y[:, k, :, j].roll(dims=-1, shifts=-1) + k += 1 + y[:, k, :, j] = y[:, k, :, j].roll(dims=-1, shifts=1) + k += 1 + + return y + + def generate_sequences(self, nb): + x = self.start_random(nb) + + seq = [x.flatten(1)] + + for t in range(self.nb_time_steps - 1): + y = self.moves(x) + x = y[torch.arange(nb), torch.randint(y.size(1), (nb,))] + seq.append(x.flatten(1)) + + if self.hard: + seq.reverse() + + seq = torch.cat(seq, dim=1) + return seq, seq.new_full(seq.size(), 1, dtype=torch.int64) + + def compute_nb_correct(self, input, ar_mask, result): + a = [ + x.reshape(result.size(0), self.height, self.width) + for x in result.split(self.height * self.width, dim=1) + ] + if self.hard: + a.reverse() + + x = a[0] + + d = self.start_error(x) + + for t in range(self.nb_time_steps - 1): + x0, x = a[t], a[t + 1] + y = self.moves(x0) + d = d + (x[:, None] - y).abs().sum((-1, -2)).min(dim=-1).values + + nb_total, nb_correct = result.size(0), (d == 0).long().sum().item() + + return nb_total, nb_correct + + def seq2str(self, seq): + return " | ".join( + [ + " ".join( + ["-".join([f"{x:02d}" if x < self.height * self.width else "**" for x in s]) for s in r.split(self.width)] + ) + for r in seq.split(self.height * self.width) + ] + ) + + +#################### + if __name__ == "__main__": - p = ProblemDegradation(hard=False) + p = ProblemMixing() s, m = p.generate_sequences(10000) - print(p.seq2str(s[0])) + for x in s[:5]: + print(p.seq2str(x)) print(p.compute_nb_correct(None, None, s))