+ if torch.cuda.is_available():
+ device = torch.device("cuda")
+ elif torch.backends.mps.is_available():
+ device = torch.device("mps")
+ else:
+ device = torch.device("cpu")
+
+ # grid = FormalGrid(grid_height=7, grid_width=7, nb_symbols=4, device=device)
+ # grid_set = grid.new_grid_set(["4 is_equidistant_from 2 and 3", "2 4 is_parallel_to_diagonal"])
+ # print(next(iter(grid.views(grid_set))))
+ # exit(0)
+
+ def proof_depth(steps, c):
+ a = steps.get(c)
+ if a is None:
+ return 0
+ else:
+ c1, c2 = a
+ return max(proof_depth(steps, c1), proof_depth(steps, c2))
+
+ def generate_proof(grid):
+ while True:
+ constraints = [grid.random_property() for _ in range(10)]
+ grid_set = grid.new_grid_set(constraints)
+ if grid_set.any():
+ break
+
+ mg = grid.master_grid_set
+
+ print(constraints)
+
+ initial = constraints.copy()
+
+ steps = {}
+
+ for _ in range(1000):
+ c1, c2 = random.sample(constraints, 2)
+ f1, f2 = grid.constraint_to_fun(c1), grid.constraint_to_fun(c2)
+ for _ in range(100):
+ c = grid.random_property()
+ if c not in constraints:
+ f = grid.constraint_to_fun(c)
+ if (
+ (mg & f1 & ~f).any()
+ and (mg & f2 & ~f).any()
+ and (mg & f1 & f2 & f).any()
+ and not (mg & f1 & f2 & ~f).any()
+ ):
+ constraints.append(c)
+ print(c1, "and", c2, "=>", c)
+ steps[c] = (c1, c2)
+ # print(next(iter(grid.views(grid.new_grid_set([c1, c2])))))
+ # print("we have", constraints)
+ # proof.append(c1 + " and " + c2 + " hence " + c)
+ break
+
+ if steps.keys() and max([proof_depth(steps, c) for c in steps.keys()]) >= 3:
+
+ break
+
+ return initial, steps
+
+ grid = FormalGrid(grid_height=7, grid_width=7, nb_symbols=4, device=device)
+
+ initial, steps = generate_proof(grid)
+
+ print(" ; ".join(initial))
+
+ def proof(c, indent=""):
+ a = steps.get(c)
+ if a is None:
+ print(f"{indent}{c} is given")
+ else:
+ print(f"{indent}{c} since")
+ c1, c2 = a
+ proof(c1, indent + " ")
+ proof(c2, indent + " ")
+
+ print(" ; ".join(initial))
+
+ for c in steps.keys():
+ proof(c)
+ print()
+
+ exit(0)