elipl/new.exs
Kacper Marzecki 8a6f84238e kinda works
2025-06-20 17:39:23 +02:00

3413 lines
112 KiB
Elixir

defmodule Tdd.Debug do
@moduledoc """
Provides macros to wrap `def` and `defp` for simple function call/return tracing.
Logs arguments as a list and return values using `IO.inspect`.
"""
# --- Agent for Tracing State ---
@agent_name Tdd.Debug.StateAgent
def init_agent_if_needed do
case Process.whereis(@agent_name) do
nil -> Agent.start_link(fn -> MapSet.new() end, name: @agent_name)
_pid -> :ok
end
:ok
end
def add_traced_pid(pid) when is_pid(pid) do
init_agent_if_needed()
Agent.update(@agent_name, &MapSet.put(&1, pid))
end
def remove_traced_pid(pid) when is_pid(pid) do
case Process.whereis(@agent_name) do
nil -> :ok
agent_pid -> Agent.cast(agent_pid, fn state -> MapSet.delete(state, pid) end)
end
end
def is_pid_traced?(pid) when is_pid(pid) do
case Process.whereis(@agent_name) do
nil ->
false
agent_pid ->
try do
Agent.get(agent_pid, &MapSet.member?(&1, pid), :infinity)
rescue
_e in [Exit, ArgumentError] -> false
end
end
end
# --- Tracing Control Functions ---
@doc "Enables function call tracing for the current process."
def enable_tracing do
pid_to_trace = self()
add_traced_pid(pid_to_trace)
ref = Process.monitor(pid_to_trace)
Process.spawn(fn ->
receive do
{:DOWN, ^ref, :process, ^pid_to_trace, _reason} ->
remove_traced_pid(pid_to_trace)
after
3_600_000 -> # 1 hour safety timeout
remove_traced_pid(pid_to_trace)
end
end, [:monitor])
:ok
end
@doc "Disables function call tracing for the current process."
def disable_tracing do
remove_traced_pid(self())
:ok
end
@doc "Runs the given 0-arity function with tracing enabled, then disables it."
def run(fun) when is_function(fun, 0) do
enable_tracing()
try do
fun.()
after
disable_tracing()
end
end
# --- Process Dictionary for Call Depth ---
defp get_depth, do: Process.get(:tdd_debug_depth, 0)
def increment_depth do
new_depth = get_depth() + 1
Process.put(:tdd_debug_depth, new_depth)
new_depth
end
def decrement_depth do
new_depth = max(0, get_depth() - 1)
Process.put(:tdd_debug_depth, new_depth)
new_depth
end
# --- Core Macro Logic ---
defmacro __using__(_opts) do
quote do
import Kernel, except: [def: 1, def: 2, defp: 1, defp: 2]
import Tdd.Debug
require Tdd.Debug
end
end
@doc false
defmacro def(call, clauses \\ Keyword.new()) do
generate_traced_function(:def, call, clauses, __CALLER__)
end
@doc false
defmacro defp(call, clauses \\ Keyword.new()) do
generate_traced_function(:defp, call, clauses, __CALLER__)
end
defp is_simple_variable_ast?(ast_node) do
case ast_node do
{var_name, _meta, _context} when is_atom(var_name) ->
var_name != :_
_ -> false
end
end
defp generate_traced_function(type, call_ast, clauses, caller_env) do
require Macro # Good practice
{function_name_ast, meta_call, original_args_patterns_ast_nullable} = call_ast
original_args_patterns_ast_list = original_args_patterns_ast_nullable || []
original_body_ast =
(if Keyword.keyword?(clauses) do
Keyword.get(clauses, :do, clauses)
else
clauses
end) || quote(do: nil)
mapped_and_generated_vars_tuples =
Enum.map(Enum.with_index(original_args_patterns_ast_list), fn {original_pattern_ast, index} ->
# __td_arg_N__ is for logging, make it hygienic with `nil` context (or __MODULE__)
td_arg_var = Macro.var(String.to_atom("__td_arg_#{index}__"), nil)
{final_pattern_for_head, rhs_for_td_arg_assignment} =
case original_pattern_ast do
# 1. Ignored variable: `_`
# AST: {:_, meta, context_module_or_nil}
{:_, _, _} = underscore_ast ->
{underscore_ast, quote(do: :__td_ignored_argument__)}
# 2. Assignment pattern: `var = pattern` or `var = _`
# AST: {:=, meta, [lhs, rhs_of_assign]}
{:=, _meta_assign, [lhs_of_assign, _rhs_of_assign]} = assignment_pattern_ast ->
if is_simple_variable_ast?(lhs_of_assign) do
{assignment_pattern_ast, lhs_of_assign} # Head uses `var = pattern`, log `var`
else
# LHS is complex (e.g., `%{key: v} = pattern`), capture the whole value.
captured_val_var = Macro.unique_var(String.to_atom("tdc_assign_#{index}"), Elixir)
new_head_pattern = quote do unquote(captured_val_var) = unquote(assignment_pattern_ast) end
{new_head_pattern, captured_val_var}
end
# 3. Default argument: `pattern_before_default \\ default_value`
# AST: {:\|, meta, [pattern_before_default, default_value_ast]}
{:\\, _meta_default, [pattern_before_default, _default_value_ast]} = default_arg_pattern_ast ->
cond do
# 3a. `var \\ default`
is_simple_variable_ast?(pattern_before_default) ->
{default_arg_pattern_ast, pattern_before_default}
# 3b. `(var = inner_pattern) \\ default`
match?({:=, _, [lhs_inner_assign, _]}, pattern_before_default) and
is_simple_variable_ast?(pattern_before_default |> elem(2) |> Enum.at(0)) ->
{:=, _, [lhs_inner_assign, _]}= pattern_before_default
# `lhs_inner_assign` is the var on the left of `=`
{default_arg_pattern_ast, lhs_inner_assign}
# 3c. `(complex_pattern) \\ default` or `(_ = inner_pattern) \\ default` etc.
true ->
captured_val_var = Macro.unique_var(String.to_atom("tdc_def_#{index}"), Elixir)
new_head_pattern = quote do unquote(captured_val_var) = unquote(default_arg_pattern_ast) end
{new_head_pattern, captured_val_var}
end
# 4. Simple variable `var` (checked using our helper)
# or other complex patterns/literals not caught above.
ast_node ->
if is_simple_variable_ast?(ast_node) do
{ast_node, ast_node} # Head uses `var`, log `var`
else
# It's a complex pattern (e.g., `%{a:x}`, `[h|t]`) or a literal not assignable to.
captured_val_var = Macro.unique_var(String.to_atom("tdc_pat_#{index}"), Elixir)
new_head_pattern = quote do unquote(captured_val_var) = unquote(ast_node) end
{new_head_pattern, captured_val_var}
end
end
assignment_ast = quote do unquote(td_arg_var) = unquote(rhs_for_td_arg_assignment) end
{final_pattern_for_head, assignment_ast, td_arg_var}
end)
{new_args_patterns_for_head_list, assignments_for_logging_vars_ast_list, generated_vars_to_log_asts} =
if mapped_and_generated_vars_tuples == [],
do: {[], [], []},
else: mapped_and_generated_vars_tuples |> Enum.map(&Tuple.to_list(&1)) |> Enum.zip()|> Enum.map(&Tuple.to_list(&1))
|> then(fn [a, b, c] -> {a, b, c} end)
# Enum.unzip(mapped_and_generated_vars_tuples)
new_call_ast = {function_name_ast, meta_call, new_args_patterns_for_head_list}
traced_body_inner_ast =
quote do
unquote_splicing(assignments_for_logging_vars_ast_list)
if Tdd.Debug.is_pid_traced?(self()) do
current_print_depth = Tdd.Debug.increment_depth()
indent = String.duplicate(" ", current_print_depth - 1)
runtime_arg_values = [unquote_splicing(generated_vars_to_log_asts)]
actual_module_name_str = Atom.to_string(unquote(caller_env.module))
# The function_name_ast is resolved at macro expansion time.
# If it's `def foo(...)`, `unquote(function_name_ast)` becomes `:foo`.
# If `def unquote(name_var)(...)`, it resolves `name_var`.
resolved_fn_name = unquote(function_name_ast)
printable_function_name_str =
if is_atom(resolved_fn_name) do
Atom.to_string(resolved_fn_name)
else
Macro.to_string(resolved_fn_name) # For complex names / operators if AST passed
end
IO.puts(
"#{indent}CALL: #{actual_module_name_str}.#{printable_function_name_str}"
)
IO.puts(
"#{indent} ARGS: #{inspect(runtime_arg_values)}"
)
try do
result = unquote(original_body_ast)
_ = Tdd.Debug.decrement_depth()
IO.puts(
"#{indent}RETURN from #{actual_module_name_str}.#{printable_function_name_str}: #{inspect(result)}"
)
result
rescue
exception_class ->
error_instance = exception_class
stacktrace = __STACKTRACE__
_ = Tdd.Debug.decrement_depth()
IO.puts(
"#{indent}ERROR in #{actual_module_name_str}.#{printable_function_name_str}: #{inspect(error_instance)}"
)
reraise error_instance, stacktrace
end
else
unquote(original_body_ast)
end
end
final_definition_ast =
quote location: :keep do
Kernel.unquote(type)(
unquote(new_call_ast),
do: unquote(traced_body_inner_ast)
)
end
final_definition_ast
end
# --- TDD Graph Printing (Kept as it was, not directly related to call tracing simplification) ---
@doc "Prints a formatted representation of a TDD graph structure."
def print_tdd_graph(id, store_module \\ Tdd.Store) do
IO.puts("--- TDD Graph (ID: #{id}) ---")
do_print_tdd_node(id, 0, MapSet.new(), store_module)
IO.puts("------------------------")
end
defp do_print_tdd_node(id, indent_level, visited, store_module) do
prefix = String.duplicate(" ", indent_level)
if MapSet.member?(visited, id) do
IO.puts("#{prefix}ID #{id} -> [Seen, recursive link]")
:ok
else
new_visited = MapSet.put(visited, id)
case store_module.get_node(id) do # Assumes store_module.get_node/1 exists
{:ok, :true_terminal} -> IO.puts("#{prefix}ID #{id} -> TRUE")
{:ok, :false_terminal} -> IO.puts("#{prefix}ID #{id} -> FALSE")
{:ok, {var, y_id, n_id, dc_id}} ->
IO.puts("#{prefix}ID #{id}: IF #{inspect(var)}")
IO.puts("#{prefix} ├─ Yes (to ID #{y_id}):")
do_print_tdd_node(y_id, indent_level + 2, new_visited, store_module)
IO.puts("#{prefix} ├─ No (to ID #{n_id}):")
do_print_tdd_node(n_id, indent_level + 2, new_visited, store_module)
IO.puts("#{prefix} └─ DC (to ID #{dc_id}):")
do_print_tdd_node(dc_id, indent_level + 2, new_visited, store_module)
{:error, reason} ->
IO.puts("#{prefix}ID #{id}: ERROR - #{reason}")
end
end
end
end
defmodule Tdd.TypeSpec do
@moduledoc """
Defines the `TypeSpec` structure and functions for its manipulation.
A `TypeSpec` is a stable, structural, and declarative representation of a type.
It serves as the primary language for defining and interacting with types in the
higher-level system, abstracting away the underlying TDD implementation.
All `TypeSpec`s should be passed through `normalize/1` before being used in
caching or compilation to ensure a canonical representation.
"""
@typedoc "A stable, structural representation of a type."
# --- Core Types ---
@type t ::
:any
| :none
| :atom
| :integer
| :list
| :tuple
# (Add :binary, :function, :pid, etc. here as they are implemented)
# --- Literal Value Type ---
| {:literal, term()}
# --- Set-Theoretic Combinators ---
# Note: The `normalize/1` function guarantees that the lists in
# :union and :intersect are sorted, unique, and flattened.
| {:union, [t()]}
| {:intersect, [t()]}
| {:negation, t()}
# --- Parameterized Structural Types ---
| {:tuple, [t()]}
| {:cons, head :: t(), tail :: t()}
| {:list_of, element :: t()}
# --- Integer Range (Example of property-based type) ---
| {:integer_range, min :: integer() | :neg_inf, max :: integer() | :pos_inf}
# --- For Polymorphism (Future Use) ---
| {:type_var, atom()}
@doc """
Converts a `TypeSpec` into its canonical (normalized) form.
Normalization is crucial for reliable caching and simplifying downstream logic.
It performs several key operations:
1. **Flattens nested unions and intersections:**
`{:union, [A, {:union, [B, C]}]}` becomes `{:union, [A, B, C]}`.
2. **Sorts and uniqs members of unions and intersections:**
`{:union, [C, A, A]}` becomes `{:union, [A, C]}`.
3. **Applies logical simplification rules (idempotency, annihilation):**
- `A | A` -> `A`
- `A | none` -> `A`
- `A & any` -> `A`
- `A & none` -> `none`
- `A | any` -> `any`
- `¬(¬A)` -> `A`
- An intersection containing both `A` and `¬A` simplifies. (This is
better handled by the TDD compiler, but basic checks can happen here).
4. **Recursively normalizes all sub-specs.**
"""
@spec normalize(t()) :: t()
def normalize(spec) do
case spec do
# Base cases are unchanged
s when is_atom(s) -> s
{:literal, _} -> spec
{:type_var, _} -> spec
# Recursive cases now call helper functions
{:negation, sub_spec} -> normalize_negation(sub_spec)
{:tuple, elements} -> {:tuple, Enum.map(elements, &normalize/1)}
{:cons, head, tail} -> {:cons, normalize(head), normalize(tail)}
{:list_of, element} -> {:list_of, normalize(element)}
# A new rule for integer ranges
{:integer_range, min, max} -> normalize_integer_range(min, max)
{:union, members} -> normalize_union(members)
{:intersect, members} -> normalize_intersection(members)
end
end
# ------------------------------------------------------------------
# Private Normalization Helpers
# ------------------------------------------------------------------
defp normalize_negation(sub_spec) do
normalized_sub = normalize(sub_spec)
case normalized_sub do
# ¬(¬A) -> A
{:negation, inner_spec} -> inner_spec
:any -> :none
:none -> :any
_ -> {:negation, normalized_sub}
end
end
defp normalize_integer_range(min, max) do
# An invalid range simplifies to `none`.
if is_integer(min) and is_integer(max) and min > max do
:none
else
# An intersection with integer is implied, so we add it for canonical form.
# TODO: revisit
{:intersect, [:integer, {:integer_range, min, max}]}
{:integer_range, min, max}
end
end
defp normalize_union(members) do
# 1. Recursively normalize and flatten members
normalized_and_flattened =
Enum.flat_map(members, fn member ->
normalized = normalize(member)
case normalized,
do: (
{:union, sub_members} -> sub_members
_ -> [normalized]
)
end)
# 2. Apply simplification rules
simplified_members =
normalized_and_flattened
# A | none -> A
|> Enum.reject(&(&1 == :none))
|> MapSet.new()
if MapSet.member?(simplified_members, :any) do
# A | any -> any
:any
else
# 3. NEW: Reduce by removing subtypes
# If we have {A, B} and A <: B, the union is just B. So we keep only supersets.
# We achieve this by removing any member that is a subtype of another member.
reduced_members =
Enum.reject(simplified_members, fn member_to_check ->
Enum.any?(simplified_members, fn other_member ->
member_to_check != other_member and is_subtype?(member_to_check, other_member)
end)
end)
# 4. Finalize the structure
case reduced_members do
[] -> :none
[single_member] -> single_member
list -> {:union, Enum.sort(list)}
end
end
end
defp normalize_intersection(members) do
# IO.inspect("Normalize intersection")
# 1. Recursively normalize and flatten, but also add implied supertypes
normalized_and_flattened =
Enum.flat_map(members, fn member ->
# IO.inspect(member, label: "normalize member")
normalized = normalize(member)
# Expand a type into itself and its implied supertypes
# e.g., `:foo` becomes `[:foo, :atom]`
expanded =
case normalized do
{:intersect, sub_members} -> sub_members
_ -> get_supertypes(normalized)
end
expanded
end)
# 2. Apply simplification rules
simplified_members =
normalized_and_flattened
# A & any -> A
|> Enum.reject(&(&1 == :any))
|> MapSet.new()
if MapSet.member?(simplified_members, :none) do
# A & none -> none
:none
else
# 3. NEW: Reduce by removing supertypes
# IO.inspect("Reduce by removing supertypes")
# If we have {A, B} and A <: B, the intersection is just A. So we keep only subsets.
# We achieve this by removing any member for which a proper subtype exists in the set.
reduced_members =
Enum.reject(simplified_members, fn member_to_check ->
Enum.any?(simplified_members, fn other_member ->
member_to_check != other_member and is_subtype?(other_member, member_to_check)
end)
end)
# 4. Finalize the structure
# IO.inspect("4. Finalize the structure")
case reduced_members do
[] -> :any
[single_member] -> single_member
list -> {:intersect, Enum.sort(list)}
end
end
end
# ------------------------------------------------------------------
# Private Semantic Helpers
# ------------------------------------------------------------------
@doc """
A preliminary, non-TDD check if `spec1` is a subtype of `spec2`.
This check is not exhaustive but covers many common, structural cases,
allowing for significant simplification at the `TypeSpec` level.
"""
@spec is_subtype?(t(), t()) :: boolean
def is_subtype?(spec1, spec2) do
# Avoid infinite recursion by not re-normalizing.
# The callers are assumed to be working with normalized data.
# Base cases are handled first for efficiency.
cond do
spec1 == spec2 -> true
spec1 == :none -> true
spec2 == :any -> true
spec1 == :any or spec2 == :none -> false
# Defer to pattern-matching helper
true -> do_is_subtype?(spec1, spec2)
end
end
# Private helper that uses `case` for proper pattern matching.
defp do_is_subtype?(spec1, spec2) do
case {spec1, spec2} do
# --- Set-theoretic rules ---
# (A | B) <: C if A <: C and B <: C
{{:union, members1}, _} ->
Enum.all?(members1, &is_subtype?(&1, spec2))
# A <: (B | C) if A <: B or A <: C
{_, {:union, members2}} ->
Enum.any?(members2, &is_subtype?(spec1, &1))
# (A & B) <: C if A <: C or B <: C
{{:intersect, members1}, _} ->
Enum.any?(members1, &is_subtype?(&1, spec2))
# A <: (B & C) if A <: B and A <: C
{_, {:intersect, members2}} ->
Enum.all?(members2, &is_subtype?(spec1, &1))
# --- Literal and Base Type Rules ---
{s1, s2} when is_atom(s1) and is_atom(s2) ->
s1 == s2
{{:literal, v1}, {:literal, v2}} ->
v1 == v2
{{:literal, val}, :atom} when is_atom(val) ->
true
{{:literal, val}, :integer} when is_integer(val) ->
true
{{:literal, val}, :list} when is_list(val) ->
true
{{:literal, val}, :tuple} when is_tuple(val) ->
true
# --- List and Cons Rules ---
{{:literal, []}, :list} ->
true
{{:cons, _, _}, :list} ->
true
{{:list_of, _}, :list} ->
true
{{:cons, h1, t1}, {:cons, h2, t2}} ->
is_subtype?(h1, h2) and is_subtype?(t1, t2)
{{:list_of, e1}, {:list_of, e2}} ->
is_subtype?(e1, e2)
{{:cons, h, t}, {:list_of, x}} ->
is_subtype?(h, x) and is_subtype?(t, {:list_of, x})
{{:literal, []}, {:list_of, _}} ->
true
# --- Tuple Rules ---
{{:literal, {}}, :tuple} ->
true
{{:tuple, _}, :tuple} ->
true
{{:tuple, elems1}, {:tuple, elems2}} when length(elems1) == length(elems2) ->
Enum.zip_with(elems1, elems2, &is_subtype?/2) |> Enum.all?()
# --- Integer Range Rules ---
{{:integer_range, _, _}, :integer} ->
true
{{:integer_range, min1, max1}, {:integer_range, min2, max2}} ->
min1_gte_min2 = if min1 == :neg_inf, do: true, else: min2 != :neg_inf and min1 >= min2
max1_lte_max2 = if max1 == :pos_inf, do: true, else: max2 != :pos_inf and max1 <= max2
min1_gte_min2 and max1_lte_max2
{{:literal, val}, {:integer_range, min, max}} when is_integer(val) ->
(min == :neg_inf or val >= min) and (max == :pos_inf or val <= max)
# --- Default fallback ---
# If no specific rule matches, they are not considered subtypes.
{_, _} ->
false
end
end
# Gets a list of immediate, known supertypes for a given simple spec.
defp get_supertypes(spec) do
supertypes =
case spec do
{:literal, val} when is_atom(val) -> [:atom]
{:literal, val} when is_integer(val) -> [:integer]
{:literal, val} when is_list(val) -> [:list]
{:literal, val} when is_tuple(val) -> [:tuple]
{:cons, _, _} -> [:list]
{:list_of, _} -> [:list]
{:tuple, _} -> [:tuple]
{:integer_range, _, _} -> [:integer]
_ -> []
end
# Use a MapSet to ensure the spec and its supertypes are unique.
MapSet.to_list(MapSet.new([spec | supertypes]))
end
end
defmodule Tdd.Store do
use Tdd.Debug
@moduledoc """
Manages the state of the TDD system's node graph and operation cache.
This module acts as the stateful backend for the TDD algorithms. It is
responsible for creating unique, shared nodes (ensuring structural canonicity)
and for memoizing the results of expensive operations.
It is intentionally agnostic about the *meaning* of the variables within the
nodes; it treats them as opaque, comparable terms. The logic for interpreting
these variables resides in higher-level modules like `Tdd.Algo` and
`Tdd.Consistency.Engine`.
For simplicity, this implementation uses the Process dictionary for state.
In a production, concurrent application, this would be replaced by a `GenServer`
to ensure safe, serialized access to the shared TDD state.
"""
# --- State Keys ---
@nodes_key :tdd_nodes
@node_by_id_key :tdd_node_by_id
@next_id_key :tdd_next_id
@op_cache_key :tdd_op_cache
# --- Terminal Node IDs ---
@false_node_id 0
@true_node_id 1
# --- Public API ---
@doc "Initializes the TDD store in the current process."
def init do
# The main lookup table: {variable, y, n, d} -> id
Process.put(@nodes_key, %{})
# The reverse lookup table: id -> {variable, y, n, d} or :terminal
Process.put(@node_by_id_key, %{
@false_node_id => :false_terminal,
@true_node_id => :true_terminal
})
# The next available integer ID for a new node.
Process.put(@next_id_key, 2)
# The cache for memoizing operation results: {op, args} -> id
Process.put(@op_cache_key, %{})
:ok
end
@doc "Returns the ID for the TRUE terminal node (the 'any' type)."
@spec true_node_id() :: non_neg_integer()
def true_node_id, do: @true_node_id
@doc "Returns the ID for the FALSE terminal node (the 'none' type)."
@spec false_node_id() :: non_neg_integer()
def false_node_id, do: @false_node_id
@doc "Retrieves the details of a node by its ID."
@spec get_node(non_neg_integer()) ::
{:ok,
{variable :: term(), yes_id :: non_neg_integer(), no_id :: non_neg_integer(),
dc_id :: non_neg_integer()}}
| {:ok, :true_terminal | :false_terminal}
| {:error, :not_found}
def get_node(id) do
case Process.get(@node_by_id_key, %{}) do
%{^id => details} -> {:ok, details}
%{} -> {:error, :not_found}
end
end
@doc """
Finds an existing node that matches the structure or creates a new one.
This is the core function for ensuring structural sharing (part of the "Reduced"
property of ROBDDs). It also implements a fundamental reduction rule: if all
children of a node are identical, the node is redundant and is replaced by
its child.
"""
@spec find_or_create_node(
variable :: term(),
yes_id :: non_neg_integer(),
no_id :: non_neg_integer(),
dc_id :: non_neg_integer()
) :: non_neg_integer()
def find_or_create_node(variable, yes_id, no_id, dc_id) do
# Basic reduction rule: a node whose test is irrelevant is redundant.
if yes_id == no_id && yes_id == dc_id do
yes_id
else
node_tuple = {variable, yes_id, no_id, dc_id}
nodes = Process.get(@nodes_key, %{})
case Map.get(nodes, node_tuple) do
# Node already exists, return its ID for structural sharing.
id when is_integer(id) ->
id
# Node does not exist, create it.
nil ->
next_id = Process.get(@next_id_key)
node_by_id = Process.get(@node_by_id_key)
# Update all state tables
Process.put(@nodes_key, Map.put(nodes, node_tuple, next_id))
Process.put(@node_by_id_key, Map.put(node_by_id, next_id, node_tuple))
Process.put(@next_id_key, next_id + 1)
next_id
end
end
end
@doc "Retrieves a result from the operation cache."
@spec get_op_cache(term()) :: {:ok, term()} | :not_found
def get_op_cache(cache_key) do
case Process.get(@op_cache_key, %{}) do
%{^cache_key => result} -> {:ok, result}
%{} -> :not_found
end
end
@doc "Puts a result into the operation cache."
@spec put_op_cache(term(), term()) :: :ok
def put_op_cache(cache_key, result) do
# Using `get_and_update_in` would be safer but this is fine for this context.
cache = Process.get(@op_cache_key, %{})
Process.put(@op_cache_key, Map.put(cache, cache_key, result))
:ok
end
@doc """
Creates a unique, temporary placeholder node for a recursive spec.
Returns the ID of this placeholder.
"""
@spec create_placeholder(TypeSpec.t()) :: non_neg_integer()
def create_placeholder(spec) do
# The variable is a unique tuple that won't conflict with real predicates.
# The children are meaningless (-1) as they will be replaced.
find_or_create_node({:placeholder, spec}, -1, -1, -1)
end
@doc """
Updates a node's details directly. This is a special-purpose, mutable-style
operation used exclusively by the compiler to "tie the knot" for recursive types.
It updates both the forward and reverse lookup tables.
"""
@spec update_node_in_place(
non_neg_integer(),
new_details ::
{:ok,
{term(), non_neg_integer(), non_neg_integer(), non_neg_integer()}
| :true_terminal
| :false_terminal}
) :: :ok
def update_node_in_place(id, {:ok, new_details}) do
# Get current state
nodes = Process.get(@nodes_key)
node_by_id = Process.get(@node_by_id_key)
# 1. Find and remove the old reverse mapping from the `nodes` table.
# The node at `id` is a placeholder, so its details are what we created above.
old_details = Map.get(node_by_id, id)
nodes = Map.delete(nodes, old_details)
# 2. Add the new reverse mapping to the `nodes` table.
# But only if the new details correspond to a non-terminal node.
nodes =
case new_details do
{_v, _y, _n, _d} -> Map.put(nodes, new_details, id)
_ -> nodes
end
# 3. Update the main `node_by_id` table.
node_by_id = Map.put(node_by_id, id, new_details)
# 4. Save the updated tables.
Process.put(@nodes_key, nodes)
Process.put(@node_by_id_key, node_by_id)
:ok
end
end
defmodule Tdd.Variable do
use Tdd.Debug
@moduledoc """
Defines the canonical structure for all Tdd predicate variables.
The structure `{category, predicate, value, padding}` is used to enforce a
stable global ordering. All variables are 4-element tuples to ensure that
Elixir's tuple-size-first comparison rule does not interfere with the
intended predicate ordering within a category.
"""
alias Tdd.TypeSpec
# --- Category 0: Primary Type Discriminators ---
# Padding with `nil` to make them 4-element tuples.
@spec v_is_atom() :: term()
def v_is_atom, do: {0, :is_atom, nil, nil}
@spec v_is_integer() :: term()
def v_is_integer, do: {0, :is_integer, nil, nil}
@spec v_is_list() :: term()
def v_is_list, do: {0, :is_list, nil, nil}
@spec v_is_tuple() :: term()
def v_is_tuple, do: {0, :is_tuple, nil, nil}
# --- Category 1: Atom Properties ---
@spec v_atom_eq(atom()) :: term()
def v_atom_eq(atom_val) when is_atom(atom_val), do: {1, :value, atom_val, nil}
# --- Category 2: Integer Properties ---
@spec v_int_lt(integer()) :: term()
def v_int_lt(n) when is_integer(n), do: {2, :alt, n, nil}
@spec v_int_eq(integer()) :: term()
def v_int_eq(n) when is_integer(n), do: {2, :beq, n, nil}
@spec v_int_gt(integer()) :: term()
def v_int_gt(n) when is_integer(n), do: {2, :cgt, n, nil}
# --- Category 4: Tuple Properties ---
# The most complex var here is `:b_element` with index and nested var.
# So all vars in this category need to be at least 4-element.
@spec v_tuple_size_eq(non_neg_integer()) :: term()
def v_tuple_size_eq(size) when is_integer(size) and size >= 0, do: {4, :a_size, size, nil}
@spec v_tuple_elem_pred(non_neg_integer(), term()) :: term()
def v_tuple_elem_pred(index, nested_pred_var) when is_integer(index) and index >= 0 do
{4, :b_element, index, nested_pred_var}
end
# --- Category 5: List Properties ---
# All are now 4-element tuples. The sorting will be correct.
# @spec v_list_all_elements_are(TypeSpec.t()) :: term()
# def v_list_all_elements_are(element_spec), do: {5, :a_all_elements, element_spec, nil}
@doc "Predicate: The list is the empty list `[]`."
@spec v_list_is_empty() :: term()
def v_list_is_empty, do: {5, :b_is_empty, nil, nil}
@doc "Predicate: Applies a nested predicate to the head of a non-empty list."
@spec v_list_head_pred(term()) :: term()
def v_list_head_pred(nested_pred_var), do: {5, :c_head, nested_pred_var, nil}
@doc "Predicate: Applies a nested predicate to the tail of a non-empty list."
@spec v_list_tail_pred(term()) :: term()
def v_list_tail_pred(nested_pred_var), do: {5, :d_tail, nested_pred_var, nil}
end
defmodule Tdd.Predicate.Info do
@moduledoc "A knowledge base for the properties of TDD predicate variables."
alias Tdd.Variable
@doc "Returns a map of traits for a given predicate variable."
@spec get_traits(term()) :: map() | nil
# --- THIS IS THE FIX ---
# Add explicit negative implications for primary types. When a value is proven
# to be of one primary type, it is implicitly proven NOT to be of the others.
def get_traits({0, :is_atom, _, _}) do
%{
type: :primary,
category: :atom,
implies: [
{Variable.v_is_integer(), false},
{Variable.v_is_list(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_integer, _, _}) do
%{
type: :primary,
category: :integer,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_list(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_list, _, _}) do
%{
type: :primary,
category: :list,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_integer(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_tuple, _, _}) do
%{
type: :primary,
category: :tuple,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_integer(), false},
{Variable.v_is_list(), false}
]
}
end
# --- The rest of the module is unchanged ---
def get_traits({1, :value, _val, _}) do
%{type: :atom_value, category: :atom, implies: [{Variable.v_is_atom(), true}]}
end
def get_traits({2, :alt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :beq, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :cgt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({4, :a_size, _, _}) do
%{type: :tuple_prop, category: :tuple, implies: [{Variable.v_is_tuple(), true}]}
end
def get_traits({4, :b_element, index, _nested_var}) do
%{
type: :tuple_recursive,
category: :tuple,
sub_key: {:elem, index},
implies: [{Variable.v_is_tuple(), true}]
}
end
# def get_traits({5, :a_all_elements, element_spec, _}) do
# %{
# type: :list_recursive_ambient,
# category: :list,
# ambient_constraint_spec: element_spec,
# implies: [{Variable.v_is_list(), true}]
# }
# end
def get_traits({5, :b_is_empty, _, _}) do
%{type: :list_prop, category: :list, implies: [{Variable.v_is_list(), true}]}
end
def get_traits({5, :c_head, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :head,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
def get_traits({5, :d_tail, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :tail,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
def get_traits(_), do: nil
end
# in a new file, e.g., lib/tdd/consistency/engine.ex
defmodule Tdd.Consistency.Engine do
use Tdd.Debug
@moduledoc """
A rule-based engine for checking the semantic consistency of a set of assumptions.
This engine is the "oracle" for the `Tdd.Algo.simplify/2` function. It takes
a set of assumptions about predicate variables (e.g., `{is_atom, true}`,
`{value == :foo, true}`) and determines if that set is logically consistent.
The check is performed recursively. At each level, it:
1. **Expands** assumptions with their local implications (e.g. `value == :foo` implies `is_atom`).
2. **Checks for flat contradictions** at the current level (e.g., `is_atom` and `is_integer`).
3. **Groups** assumptions by sub-problem (e.g., all `head(...)` predicates).
4. **Recursively calls** the checker on each sub-problem with unwrapped predicates.
"""
alias Tdd.Predicate.Info
alias Tdd.Variable
@doc """
Checks if a map of assumptions is logically consistent.
Returns `:consistent` or `:contradiction`.
"""
@spec check(map()) :: :consistent | :contradiction
def check(assumptions) do
# The main entry point now calls the recursive helper.
# The logic from the old `check/1` is now inside `do_check/1`.
do_check(assumptions)
end
@doc """
Expands a map of assumptions with all their logical implications.
Returns `{:ok, expanded_map}` or `{:error, :contradiction}`.
"""
@spec expand(map()) :: {:ok, map()} | {:error, :contradiction}
def expand(assumptions) do
# Just expose the internal helper.
expand_with_implications(assumptions)
end
# --- The Core Recursive Checker ---
defp do_check(assumptions) do
# 1. Expand assumptions with immediate, local implications.
with {:ok, expanded} <- expand_with_implications(assumptions),
# 2. Check for contradictions among the flat predicates at this level.
:ok <- check_flat_consistency(expanded) do
# 3. Group the expanded assumptions into sub-problems based on their scope.
sub_problems =
expanded
|> Enum.group_by(fn {var, _val} -> (Info.get_traits(var) || %{})[:sub_key] end)
# Drop top-level keys, we only want recursive sub-problems.
|> Map.drop([nil])
# 4. If there are no sub-problems, and we passed the flat check, we're consistent.
if map_size(sub_problems) == 0 do
:consistent
else
# 5. Recursively check each sub-problem. Stop at the first contradiction.
Enum.find_value(sub_problems, :consistent, fn {_sub_key, sub_assumptions_list} ->
# Unwrap the variables for the recursive call
# e.g., {{5, :c_head, NESTED_VAR, _}, val} -> {NESTED_VAR, val}
remapped_assumptions = remap_sub_problem_vars(sub_assumptions_list)
# If any sub-problem is a contradiction, the whole set is.
case do_check(remapped_assumptions) do
# Not a contradiction, continue checking others.
:consistent -> nil
# Contradiction found, halt and return.
:contradiction -> :contradiction
end
end)
end
else
# `expand_with_implications` or `check_flat_consistency` failed.
{:error, _reason} -> :contradiction
end
end
# --- Recursive Checking Helpers ---
@doc "Converts a list of scoped assumptions into a map of base assumptions for a sub-problem."
defp remap_sub_problem_vars(assumptions_list) do
Map.new(assumptions_list, fn {var, val} ->
{unwrap_var(var), val}
end)
end
@doc "Extracts the inner predicate variable from a recursive variable."
defp unwrap_var(var) do
case var do
{4, :b_element, _index, nested_pred_var} -> nested_pred_var
{5, :c_head, nested_pred_var, _} -> nested_pred_var
{5, :d_tail, nested_pred_var, _} -> nested_pred_var
# This function is only called on vars that are known to be recursive,
# so other cases are not expected.
other -> other
end
end
# --- Step 1: Implication Expansion (Unchanged) ---
defp expand_with_implications(assumptions) do
expand_loop(assumptions, assumptions)
end
defp expand_loop(new_assumptions, all_assumptions) do
implications =
Enum.flat_map(new_assumptions, fn
{var, true} -> Map.get(Info.get_traits(var) || %{}, :implies, [])
_ -> []
end)
case Enum.reduce(implications, {:ok, %{}}, fn {implied_var, implied_val}, acc ->
reduce_implication({implied_var, implied_val}, all_assumptions, acc)
end) do
{:error, :contradiction} = err ->
err
{:ok, newly_added} when map_size(newly_added) == 0 ->
{:ok, all_assumptions}
{:ok, newly_added} ->
expand_loop(newly_added, Map.merge(all_assumptions, newly_added))
end
end
defp reduce_implication({var, val}, all_assumptions, {:ok, new_acc}) do
case Map.get(all_assumptions, var) do
nil -> {:ok, Map.put(new_acc, var, val)}
^val -> {:ok, new_acc}
_other_val -> {:error, :contradiction}
end
end
defp reduce_implication(_implication, _all_assumptions, error_acc), do: error_acc
# --- Step 2: Flat Consistency Checks (Unchanged) ---
defp check_flat_consistency(assumptions) do
with :ok <- check_primary_type_exclusivity(assumptions),
:ok <- check_atom_consistency(assumptions),
:ok <- check_list_consistency(assumptions),
:ok <- check_integer_consistency(assumptions),
:ok <- check_tuple_consistency(assumptions) do
:ok
else
:error -> {:error, :consistency_error}
end
end
defp check_primary_type_exclusivity(assumptions) do
primary_types = [
Variable.v_is_atom(),
Variable.v_is_integer(),
Variable.v_is_list(),
Variable.v_is_tuple()
]
true_primary_types = Enum.count(primary_types, &(Map.get(assumptions, &1) == true))
if true_primary_types > 1, do: :error, else: :ok
end
defp check_atom_consistency(assumptions) do
true_atom_values =
Enum.reduce(assumptions, MapSet.new(), fn
{{1, :value, atom_val, _}, true}, acc -> MapSet.put(acc, atom_val)
_, acc -> acc
end)
if MapSet.size(true_atom_values) > 1, do: :error, else: :ok
end
defp check_tuple_consistency(assumptions) do
true_tuple_sizes =
Enum.reduce(assumptions, MapSet.new(), fn
{{4, :a_size, size, _}, true}, acc -> MapSet.put(acc, size)
_, acc -> acc
end)
if MapSet.size(true_tuple_sizes) > 1, do: :error, else: :ok
end
defp check_list_consistency(assumptions) do
is_empty = Map.get(assumptions, Variable.v_list_is_empty()) == true
has_head_prop = Enum.any?(assumptions, &match?({{5, :c_head, _, _}, true}, &1))
has_tail_prop = Enum.any?(assumptions, &match?({{5, :d_tail, _, _}, true}, &1))
if is_empty and (has_head_prop or has_tail_prop), do: :error, else: :ok
end
defp check_integer_consistency(assumptions) do
initial_range = {:neg_inf, :pos_inf}
result =
Enum.reduce_while(assumptions, initial_range, fn assumption, {min, max} ->
case assumption do
{{2, :alt, n, _}, true} -> narrow_range(min, safe_min(max, n - 1))
{{2, :alt, n, _}, false} -> narrow_range(safe_max(min, n), max)
{{2, :beq, n, _}, true} -> narrow_range(safe_max(min, n), safe_min(max, n))
{{2, :beq, n, _}, false} when min == n and max == n -> {:halt, :invalid}
{{2, :cgt, n, _}, true} -> narrow_range(safe_max(min, n + 1), max)
{{2, :cgt, n, _}, false} -> narrow_range(min, safe_min(max, n))
_ -> {:cont, {min, max}}
end
end)
case result,
do: (
:invalid -> :error
_ -> :ok
)
end
defp narrow_range(min, max) do
is_invalid =
case {min, max} do
{:neg_inf, _} -> false
{_, :pos_inf} -> false
{m, n} when is_integer(m) and is_integer(n) -> m > n
_ -> false
end
if is_invalid, do: {:halt, :invalid}, else: {:cont, {min, max}}
end
defp safe_max(:neg_inf, x), do: x
defp safe_max(x, :neg_inf), do: x
defp safe_max(:pos_inf, _), do: :pos_inf
defp safe_max(_, :pos_inf), do: :pos_inf
defp safe_max(a, b), do: :erlang.max(a, b)
defp safe_min(:pos_inf, x), do: x
defp safe_min(x, :pos_inf), do: x
defp safe_min(:neg_inf, _), do: :neg_inf
defp safe_min(_, :neg_inf), do: :neg_inf
defp safe_min(a, b), do: :erlang.min(a, b)
end
defmodule Tdd.Algo do
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
use Tdd.Debug
alias Tdd.Store
alias Tdd.Consistency.Engine
# --- Binary Operation: Apply ---
@spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
def apply(op_name, op_lambda, u1_id, u2_id) do
# Memoization wrapper
cache_key = {:apply, op_name, Enum.sort([u1_id, u2_id])}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_apply(op_name, op_lambda, u1_id, u2_id)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
defp do_apply(op_name, op_lambda, u1_id, u2_id) do
with {:ok, u1_details} <- Store.get_node(u1_id),
{:ok, u2_details} <- Store.get_node(u2_id) do
cond do
(u1_details == :true_terminal or u1_details == :false_terminal) and
(u2_details == :true_terminal or u2_details == :false_terminal) ->
if op_lambda.(u1_details, u2_details) == :true_terminal,
do: Store.true_node_id(),
else: Store.false_node_id()
u1_details == :true_terminal or u1_details == :false_terminal ->
{var2, y2, n2, d2} = u2_details
Store.find_or_create_node(
var2,
apply(op_name, op_lambda, u1_id, y2),
apply(op_name, op_lambda, u1_id, n2),
apply(op_name, op_lambda, u1_id, d2)
)
u2_details == :true_terminal or u2_details == :false_terminal ->
{var1, y1, n1, d1} = u1_details
Store.find_or_create_node(
var1,
apply(op_name, op_lambda, y1, u2_id),
apply(op_name, op_lambda, n1, u2_id),
apply(op_name, op_lambda, d1, u2_id)
)
true ->
{var1, y1, n1, d1} = u1_details
{var2, y2, n2, d2} = u2_details
top_var = Enum.min([var1, var2])
res_y =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: y1, else: u1_id),
if(var2 == top_var, do: y2, else: u2_id)
)
res_n =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: n1, else: u1_id),
if(var2 == top_var, do: n2, else: u2_id)
)
res_d =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: d1, else: u1_id),
if(var2 == top_var, do: d2, else: u2_id)
)
Store.find_or_create_node(top_var, res_y, res_n, res_d)
end
end
end
# --- Unary Operation: Negation ---
@spec negate(non_neg_integer) :: non_neg_integer
def negate(tdd_id) do
cache_key = {:negate, tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id =
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.false_node_id()
{:ok, :false_terminal} ->
Store.true_node_id()
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(var, negate(y), negate(n), negate(d))
end
Store.put_op_cache(cache_key, result_id)
result_id
{ :error, :not_found } ->
result_id =
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.false_node_id()
{:ok, :false_terminal} ->
Store.true_node_id()
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(var, negate(y), negate(n), negate(d))
end
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# --- Unary Operation: Semantic Simplification ---
@doc """
Simplifies a TDD under a set of external assumptions.
This is the main public entry point. It sets up the context for the
coinductive cycle detection.
"""
@spec simplify(non_neg_integer(), map()) :: non_neg_integer
def simplify(tdd_id, assumptions \\ %{}) do
# The main cache uses a sorted list of assumptions for a canonical key.
sorted_assumptions = Enum.sort(assumptions)
cache_key = {:simplify, tdd_id, sorted_assumptions}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
# Start the recursive simplification with an empty context.
# The context is a MapSet to track visited (id, assumptions) pairs on the call stack.
result_id = do_simplify(tdd_id, sorted_assumptions, MapSet.new())
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# The private helper now takes a `context` to detect infinite recursion.
# The private helper now takes a `context` to detect infinite recursion.
defp do_simplify(tdd_id, sorted_assumptions, context) do
current_state = {tdd_id, sorted_assumptions}
# Coinductive base case: if we have seen this exact state before in this
# call stack, we are in a loop.
if MapSet.member?(context, current_state) do
Store.true_node_id()
else
new_context = MapSet.put(context, current_state)
assumptions = Map.new(sorted_assumptions)
# If the assumptions themselves are contradictory, the result is `none`.
if Engine.check(assumptions) == :contradiction do
Store.false_node_id()
else
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
{:ok, {var, y, n, d}} ->
# Check if the variable's value is already explicitly known.
case Map.get(assumptions, var) do
true ->
do_simplify(y, sorted_assumptions, new_context)
false ->
do_simplify(n, sorted_assumptions, new_context)
:dc ->
do_simplify(d, sorted_assumptions, new_context)
# The variable's value is not explicitly known.
# Check if the context of assumptions *implies* its value.
nil ->
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) == :contradiction
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) == :contradiction
cond do
# This case should be caught by the top-level Engine.check, but is here for safety.
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
# --- THIS IS THE FIX ---
# If the context implies `var` must be true, we follow the 'yes' path.
# We do NOT add `{var, true}` to the assumptions for the recursive call,
# as that would cause the assumption set to grow infinitely and break
# the coinductive check. The original `assumptions` set already
# contains all the necessary information for the engine to work.
assumptions_imply_true ->
do_simplify(y, sorted_assumptions, new_context)
# Likewise, if the context implies `var` must be false, follow the 'no' path
# with the original, unmodified assumptions.
assumptions_imply_false ->
do_simplify(n, sorted_assumptions, new_context)
# The variable is truly independent. We must simplify all branches,
# adding the new assumption for each respective path, and reconstruct the node.
true ->
s_y = do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
s_n = do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
s_d = do_simplify(d, Enum.sort(Map.put(assumptions, var, :dc)), new_context)
Store.find_or_create_node(var, s_y, s_n, s_d)
end
end
end
end
end
end
@doc """
Recursively traverses a TDD graph from `root_id`, creating a new graph
where all references to `from_id` are replaced with `to_id`.
This is a pure function used to "tie the knot" in recursive type compilation.
"""
@spec substitute(
root_id :: non_neg_integer(),
from_id :: non_neg_integer(),
to_id :: non_neg_integer()
) ::
non_neg_integer()
def substitute(root_id, from_id, to_id) do
# Handle the trivial case where the root is the node to be replaced.
if root_id == from_id, do: to_id, else: do_substitute(root_id, from_id, to_id)
end
# The private helper uses memoization to avoid re-computing identical sub-graphs.
defp do_substitute(root_id, from_id, to_id) do
cache_key = {:substitute, root_id, from_id, to_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id =
case Store.get_node(root_id) do
# Terminal nodes are unaffected unless they are the target of substitution.
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
# For internal nodes, recursively substitute in all children.
{:ok, {var, y, n, d}} ->
new_y = substitute(y, from_id, to_id)
new_n = substitute(n, from_id, to_id)
new_d = substitute(d, from_id, to_id)
Store.find_or_create_node(var, new_y, new_n, new_d)
# This case should not be hit if the graph is well-formed.
{:error, reason} ->
raise "substitute encountered an error getting node #{root_id}: #{reason}"
end
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# defp do_simplify(tdd_id, assumptions) do
# IO.inspect([tdd_id, assumptions], label: "do_simplify(tdd_id, assumptions)")
# # First, check if the current assumption set is already a contradiction.
# # If so, any TDD under these assumptions is empty (:none).
# if Engine.check(assumptions) == :contradiction do
# Store.false_node_id()
# else
# case Store.get_node(tdd_id) do
# {:ok, :true_terminal} ->
# Store.true_node_id()
#
# {:ok, :false_terminal} ->
# Store.false_node_id()
#
# {:ok, {var, y, n, d}} ->
# # Check if the variable's value is already known or implied by the assumptions.
# case Map.get(assumptions, var) do
# true ->
# simplify(y, assumptions)
#
# false ->
# simplify(n, assumptions)
#
# :dc ->
# simplify(d, assumptions)
#
# nil ->
# # The variable's value is not explicitly known.
# # We must ask the Consistency.Engine if the assumptions *imply* a value.
#
# # Does the context imply `var` must be true?
# # This is true if adding `var == false` creates a contradiction.
# assumptions_imply_true =
# Engine.check(Map.put(assumptions, var, false)) == :contradiction
#
# # Does the context imply `var` must be false?
# # This is true if adding `var == true` creates a contradiction.
# assumptions_imply_false =
# Engine.check(Map.put(assumptions, var, true)) == :contradiction
#
# cond do
# # This can happen if the base assumptions are contradictory,
# # but it's handled at the top of the function.
# assumptions_imply_true and assumptions_imply_false ->
# Store.false_node_id()
#
# # The context forces `var` to be true, so we only need to follow the 'yes' path.
# assumptions_imply_true ->
# simplify(y, assumptions)
#
# # The context forces `var` to be false, so we only need to follow the 'no' path.
# assumptions_imply_false ->
# simplify(n, assumptions)
#
# # The variable is truly independent of the current assumptions.
# # We must simplify all branches and reconstruct the node.
# true ->
# s_y = simplify(y, Map.put(assumptions, var, true))
# s_n = simplify(n, Map.put(assumptions, var, false))
# s_d = simplify(d, Map.put(assumptions, var, :dc))
# Store.find_or_create_node(var, s_y, s_n, s_d)
# end
# end
# end
# end
# end
end
defmodule Tdd.TypeReconstructor do
@moduledoc """
Reconstructs a high-level `TypeSpec` from a low-level assumption map.
This module performs the inverse operation of the TDD compiler. It takes a
set of predicate assumptions (e.g., from a path in a TDD) and synthesizes
the most specific `TypeSpec` that satisfies all of those assumptions.
"""
use Tdd.Debug
alias Tdd.TypeSpec
alias Tdd.Predicate.Info
alias Tdd.Variable
@doc """
Takes a map of `{variable, boolean}` assumptions and returns a `TypeSpec`.
"""
@spec spec_from_assumptions(map()) :: TypeSpec.t()
def spec_from_assumptions(assumptions) do
# 1. Partition assumptions into groups for the top-level entity and its sub-components.
partitions =
Enum.group_by(assumptions, fn {var, _val} ->
case Info.get_traits(var) do
# :head or :tail
%{type: :list_recursive, sub_key: key} -> key
# {:elem, index}
%{type: :tuple_recursive, sub_key: key} -> key
# All other predicates apply to the top-level entity
_ -> :top_level
end
end)
# 2. Reconstruct the spec for the top-level entity from its flat assumptions.
top_level_assumptions = Map.get(partitions, :top_level, []) |> Map.new()
top_level_spec = spec_from_flat_assumptions(top_level_assumptions)
# 3. Recursively reconstruct specs for all sub-problems (head, tail, elements).
sub_problem_specs =
partitions
|> Map.drop([:top_level])
|> Enum.map(fn {sub_key, sub_assumptions_list} ->
# Re-map the nested variables back to their base form for the recursive call.
# e.g., {{5, :c_head, NESTED_VAR, _}, val} -> {NESTED_VAR, val}
remapped_assumptions =
sub_assumptions_list
|> Map.new(fn {var, val} ->
# This pattern matching is a bit simplified for clarity
{_cat, _pred, nested_var_or_idx, maybe_nested_var} = var
nested_var =
if is_nil(maybe_nested_var), do: nested_var_or_idx, else: maybe_nested_var
{nested_var, val}
end)
# Recursively build the spec for the sub-problem
sub_spec = spec_from_assumptions(remapped_assumptions)
# Wrap it in a constructor that describes its relationship to the parent
case sub_key do
# Partial spec: just describes the head
:head ->
{:cons, sub_spec, :any}
# Partial spec: just describes the tail
:tail ->
{:cons, :any, sub_spec}
{:elem, index} ->
# Create a sparse tuple spec, e.g., {any, any, <sub_spec>, any}
# This is complex, a simpler approach is needed for now.
# For now, we'll just return a tuple spec that isn't fully specific.
# A full implementation would need to know the tuple's size.
# This is an oversimplification but works for demo
{:tuple, [sub_spec]}
end
end)
# 4. The final spec is the intersection of the top-level spec and all sub-problem specs.
final_spec_list = [top_level_spec | sub_problem_specs]
TypeSpec.normalize({:intersect, final_spec_list})
end
@doc "Handles only the 'flat' (non-recursive) assumptions for a single entity."
defp spec_from_flat_assumptions(assumptions) do
specs =
Enum.map(assumptions, fn {var, bool_val} ->
# Convert each assumption into a `TypeSpec`.
# A `true` assumption means the type is `X`.
# A `false` assumption means the type is `¬X`.
spec =
case var do
{0, :is_atom, _, _} -> :atom
{0, :is_integer, _, _} -> :integer
{0, :is_list, _, _} -> :list
{0, :is_tuple, _, _} -> :tuple
{1, :value, val, _} -> {:literal, val}
# For integer properties, we create a range spec. This part could be more detailed.
# x < n
{2, :alt, n, _} -> {:integer_range, :neg_inf, n - 1}
{2, :beq, n, _} -> {:literal, n}
# x > n
{2, :cgt, n, _} -> {:integer_range, n + 1, :pos_inf}
# Simplified for now
{4, :a_size, _, _} -> :tuple
{5, :b_is_empty, _, _} -> {:literal, []}
# --- THIS IS THE FIX ---
# Correctly reconstruct a list_of spec from the ambient predicate.
# {5, :a_all_elements, element_spec, _} ->
# {:list_of, element_spec}
# Ignore recursive and ambient vars at this flat level
_ -> :any
end
if bool_val, do: spec, else: {:negation, spec}
end)
# The result is the intersection of all the individual specs.
TypeSpec.normalize({:intersect, specs})
end
end
defmodule Tdd.Compiler do
use Tdd.Debug
@moduledoc "Compiles a `TypeSpec` into a canonical TDD ID."
alias Tdd.TypeSpec
alias Tdd.Variable
alias Tdd.Store
alias Tdd.Algo
@doc """
The main public entry point. Takes a spec and returns its TDD ID.
This now delegates to a private function with a context for recursion.
"""
@spec spec_to_id(TypeSpec.t()) :: non_neg_integer()
def spec_to_id(spec) do
# Start with an empty context map.
spec_to_id(spec, %{})
end
# This is the new core compilation function with a context map.
# The context tracks `{spec => placeholder_id}` for in-progress compilations.
defp spec_to_id(spec, context) do
normalized_spec = TypeSpec.normalize(spec)
cache_key = {:spec_to_id, normalized_spec}
case Store.get_op_cache(cache_key) do
{:ok, id} ->
id
:not_found ->
# Check if we are in a recursive call for a spec we're already compiling.
case Map.get(context, normalized_spec) do
placeholder_id when is_integer(placeholder_id) ->
placeholder_id
nil ->
# This is a new spec. Decide which compilation path to take.
if is_recursive_spec?(normalized_spec) do
# Use the full knot-tying logic only for recursive types.
compile_recursive_spec(normalized_spec, context)
else
# Use the simple, direct path for all other types.
compile_non_recursive_spec(normalized_spec, context)
end
end
end
end
defp is_recursive_spec?({:list_of, _a}), do: true
defp is_recursive_spec?(_a), do: false
# NEW: The logic for simple, non-recursive types.
defp compile_non_recursive_spec(spec, context) do
# Just compile the body directly. Pass context in case it contains recursive children.
raw_id = do_spec_to_id(spec, context)
final_id = Algo.simplify(raw_id)
# Cache and return.
Store.put_op_cache({:spec_to_id, spec}, final_id)
final_id
end
# NEW: The full knot-tying logic, now isolated.
defp compile_recursive_spec(spec, context) do
# 1. Create a placeholder and update the context.
placeholder_id = Store.create_placeholder(spec)
new_context = Map.put(context, spec, placeholder_id)
# 2. Compile the body. It will be built with pointers to the placeholder.
body_id = do_spec_to_id(spec, new_context)
# 3. THIS IS THE CRUCIAL FIX:
# Instead of mutating the store, we create a NEW graph where all
# internal pointers to `placeholder_id` are replaced with `body_id`.
# This makes the graph point to its own root, creating the cycle.
final_id = Algo.substitute(body_id, placeholder_id, body_id)
# 4. Simplify the resulting (now correctly cyclic) graph.
simplified_id = Algo.simplify(final_id)
# 5. Cache the result and return it.
Store.put_op_cache({:spec_to_id, spec}, simplified_id)
simplified_id
end
# This helper does the raw, structural compilation.
# It now takes and passes down the context.
defp do_spec_to_id(spec, context) do
case spec do
# Pass context on all recursive calls to spec_to_id/2
:any ->
Store.true_node_id()
:none ->
Store.false_node_id()
:atom ->
create_base_type_tdd(Variable.v_is_atom())
:integer ->
create_base_type_tdd(Variable.v_is_integer())
:list ->
create_base_type_tdd(Variable.v_is_list())
:tuple ->
create_base_type_tdd(Variable.v_is_tuple())
{:literal, val} when is_atom(val) ->
compile_value_equality(:atom, Variable.v_atom_eq(val), context)
{:literal, val} when is_integer(val) ->
compile_value_equality(:integer, Variable.v_int_eq(val), context)
{:literal, []} ->
compile_value_equality(:list, Variable.v_list_is_empty(), context)
{:integer_range, min, max} ->
compile_integer_range(min, max, context)
{:union, specs} ->
Enum.map(specs, &spec_to_id(&1, context))
|> Enum.reduce(Store.false_node_id(), fn id, acc ->
Algo.apply(:sum, &op_union_terminals/2, id, acc)
end)
{:intersect, specs} ->
Enum.map(specs, &spec_to_id(&1, context))
|> Enum.reduce(Store.true_node_id(), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
{:negation, sub_spec} ->
Algo.negate(spec_to_id(sub_spec, context))
{:cons, head_spec, tail_spec} ->
id_head = spec_to_id(head_spec, context)
id_tail = spec_to_id(tail_spec, context)
compile_cons_from_ids(id_head, id_tail, context)
{:tuple, elements} ->
compile_tuple(elements, context)
# --- REMOVE THE SPECIAL CASE ---
# The main `spec_to_id/2` logic now handles ALL recursive types.
{:list_of, _element_spec} ->
# We need to compile the logical definition: [] | cons(E, list_of(E))
# The TypeSpec normalizer doesn't do this expansion, so we do it here.
{:ok, {:list_of, element_spec}} = {:ok, spec}
recursive_def = {:union, [{:literal, []}, {:cons, element_spec, spec}]}
# Now compile this definition. The main `spec_to_id/2` will handle the knot-tying.
spec_to_id(recursive_def, context)
_ ->
raise "Tdd.Compiler: Cannot compile unknown spec: #{inspect(spec)}"
end
end
# --- Private Helpers ---
defp create_base_type_tdd(var),
do:
Store.find_or_create_node(
var,
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
defp compile_value_equality(base_type_spec, value_var, context) do
eq_node = create_base_type_tdd(value_var)
base_node_id = spec_to_id(base_type_spec, context)
Algo.apply(:intersect, &op_intersect_terminals/2, base_node_id, eq_node)
end
defp compile_integer_range(min, max, context) do
base_id = spec_to_id(:integer, context)
lt_min_tdd = if min != :neg_inf, do: create_base_type_tdd(Variable.v_int_lt(min))
gte_min_tdd = if lt_min_tdd, do: Algo.negate(lt_min_tdd), else: spec_to_id(:any)
id_with_min = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, gte_min_tdd)
if max == :pos_inf do
id_with_min
else
lt_max_plus_1_tdd = create_base_type_tdd(Variable.v_int_lt(max + 1))
Algo.apply(:intersect, &op_intersect_terminals/2, id_with_min, lt_max_plus_1_tdd)
end
end
# Pass context
defp compile_cons_from_ids(h_id, t_id, context) do
# Build `list & !is_empty` manually and safely.
id_list = create_base_type_tdd(Variable.v_is_list())
id_is_empty = create_base_type_tdd(Variable.v_list_is_empty())
id_not_is_empty = Algo.negate(id_is_empty)
non_empty_list_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id_list, id_not_is_empty)
# Pass context
head_checker = sub_problem(&Variable.v_list_head_pred/1, h_id, context)
# Pass context
tail_checker = sub_problem(&Variable.v_list_tail_pred/1, t_id, context)
[non_empty_list_id, head_checker, tail_checker]
|> Enum.reduce(Store.true_node_id(), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
end
defp compile_tuple(elements, context) do
size = length(elements)
base_id = spec_to_id(:tuple, context)
size_tdd = create_base_type_tdd(Variable.v_tuple_size_eq(size))
initial_id = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, size_tdd)
elements
|> Enum.with_index()
|> Enum.reduce(initial_id, fn {elem_spec, index}, acc_id ->
elem_id = spec_to_id(elem_spec)
elem_key_constructor = &Variable.v_tuple_elem_pred(index, &1)
# Pass context
elem_checker = sub_problem(elem_key_constructor, elem_id, context)
Algo.apply(:intersect, &op_intersect_terminals/2, acc_id, elem_checker)
end)
end
defp do_sub_problem(sub_key_constructor, tdd_id, context) do
# The `if` block is now a standard multi-clause `cond` or `case` at the top.
# Let's use a `cond` to make the guard explicit.
cond do
# Guard against invalid IDs from placeholder children.
tdd_id < 0 ->
Store.false_node_id()
# If it's a valid ID, proceed with the main logic.
true ->
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
# Handle placeholders by operating on their spec, not their TDD structure.
{:ok, {{:placeholder, spec}, _, _, _}} ->
dummy_var = sub_key_constructor.(:dummy)
case dummy_var do
{5, :c_head, :dummy, _} ->
{:list_of, element_spec} = spec
spec_to_id(element_spec, context)
{5, :d_tail, :dummy, _} ->
tdd_id
{4, :b_element, index, :dummy} ->
{:tuple, elements} = spec
spec_to_id(Enum.at(elements, index), context)
_ ->
raise "sub_problem encountered an unhandled recursive predicate on a placeholder: #{inspect(dummy_var)}"
end
# The normal, non-placeholder case
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(
sub_key_constructor.(var),
sub_problem(sub_key_constructor, y, context),
sub_problem(sub_key_constructor, n, context),
sub_problem(sub_key_constructor, d, context)
)
# This case should now be unreachable.
{:error, :not_found} ->
raise "sub_problem received an unknown tdd_id: #{tdd_id}"
end
end
end
defp sub_problem(sub_key_constructor, tdd_id, context) do
cache_key = {:sub_problem, sub_key_constructor, tdd_id}
# Note: context is not part of the cache key. This is a simplification that
# assumes the result of a sub_problem is independent of the wider compilation
# context, which is true for our use case.
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_sub_problem(sub_key_constructor, tdd_id, context)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# defp compile_list_of(element_spec) do
# norm_elem_spec = TypeSpec.normalize(element_spec)
# cache_key = {:compile_list_of, norm_elem_spec}
# case Store.get_op_cache(cache_key) do
# {:ok, id} -> id
# :not_found ->
# id = do_compile_list_of(norm_elem_spec)
# Store.put_op_cache(cache_key, id)
# id
# end
# end
#
# defp do_compile_list_of(element_spec) do
# id_elem = spec_to_id(element_spec)
# id_empty_list = spec_to_id({:literal, []})
# step_function = fn id_x ->
# id_cons = compile_cons_from_ids(id_elem, id_x)
# Algo.apply(:sum, &op_union_terminals/2, id_empty_list, id_cons)
# end
# loop_until_stable(Store.false_node_id(), step_function)
# end
# THIS IS THE FINAL, CORRECTED LOOP
defp loop_until_stable(prev_id, step_function, iteration \\ 0) do
IO.puts("\n--- Fixed-Point Iteration: #{iteration} ---")
IO.inspect(prev_id, label: "prev_id")
# Tdd.Debug.print(prev_id) # Optional: uncomment for full graph
raw_next_id = step_function.(prev_id)
IO.inspect(raw_next_id, label: "raw_next_id (after step_function)")
# Let's see the raw graph
Tdd.Debug.print(raw_next_id)
# Crucially, simplify after each step for canonical comparison.
next_id = Algo.simplify(raw_next_id)
IO.inspect(next_id, label: "next_id (after simplify)")
# Let's see the simplified graph
Tdd.Debug.print(next_id)
if next_id == prev_id do
IO.puts("--- Fixed-Point Reached! ---")
next_id
else
# Add a safety break for debugging
if iteration > 2 do
IO.inspect(Process.info(self(), :current_stacktrace))
raise "Fixed-point iteration did not converge after 2 steps. Halting."
end
loop_until_stable(next_id, step_function, iteration + 1)
end
end
def is_subtype(spec1, spec2) do
id1 = spec_to_id(spec1)
id2 = spec_to_id(spec2)
# The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
neg_id2 = Algo.negate(id2)
op_intersect = fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
t, :true_terminal -> t
:true_terminal, t -> t
# Default case for non-terminal nodes, though apply handles recursion
_t1, _t2 -> :non_terminal
end
intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
final_id = Algo.simplify(intersect_id)
final_id == Store.false_node_id()
end
# --- Private Functions for Terminal Logic ---
defp op_union_terminals(:true_terminal, _), do: :true_terminal
defp op_union_terminals(_, :true_terminal), do: :true_terminal
defp op_union_terminals(t, :false_terminal), do: t
defp op_union_terminals(:false_terminal, t), do: t
defp op_intersect_terminals(:false_terminal, _), do: :false_terminal
defp op_intersect_terminals(_, :false_terminal), do: :false_terminal
defp op_intersect_terminals(t, :true_terminal), do: t
defp op_intersect_terminals(:true_terminal, t), do: t
end
####
# xxx
####
defmodule TddStoreTests do
def test(name, expected, result) do
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Store Tests ---")
Process.put(:test_failures, [])
# --- Test Setup ---
Tdd.Store.init()
# --- Test Cases ---
IO.puts("\n--- Section: Initialization and Terminals ---")
test("true_node_id returns 1", 1, Tdd.Store.true_node_id())
test("false_node_id returns 0", 0, Tdd.Store.false_node_id())
test("get_node for ID 1 returns true_terminal", {:ok, :true_terminal}, Tdd.Store.get_node(1))
test(
"get_node for ID 0 returns false_terminal",
{:ok, :false_terminal},
Tdd.Store.get_node(0)
)
test(
"get_node for unknown ID returns not_found",
{:error, :not_found},
Tdd.Store.get_node(99)
)
IO.puts("\n--- Section: Node Creation and Structural Sharing ---")
# Define some opaque variables
var_a = {:is_atom}
var_b = {:is_integer}
true_id = Tdd.Store.true_node_id()
false_id = Tdd.Store.false_node_id()
# Create a new node. It should get ID 2.
id1 = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test("First created node gets ID 2", 2, id1)
# Verify its content
test(
"get_node for ID 2 returns the correct tuple",
{:ok, {var_a, true_id, false_id, false_id}},
Tdd.Store.get_node(id1)
)
# Create another, different node. It should get ID 3.
id2 = Tdd.Store.find_or_create_node(var_b, id1, false_id, false_id)
test("Second created node gets ID 3", 3, id2)
# Attempt to create the first node again.
id1_again = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test(
"Attempting to create an existing node returns the same ID (Structural Sharing)",
id1,
id1_again
)
# Check that next_id was not incremented by the shared call
id3 = Tdd.Store.find_or_create_node(var_b, true_id, false_id, false_id)
test("Next new node gets the correct ID (4)", 4, id3)
IO.puts("\n--- Section: Basic Reduction Rule ---")
# Create a node where all children are the same.
id_redundant = Tdd.Store.find_or_create_node(var_a, id3, id3, id3)
test(
"A node with identical children reduces to the child's ID",
id3,
id_redundant
)
IO.puts("\n--- Section: Caching ---")
cache_key = {:my_op, 1, 2}
test("Cache is initially empty for a key", :not_found, Tdd.Store.get_op_cache(cache_key))
Tdd.Store.put_op_cache(cache_key, :my_result)
test(
"Cache returns the stored value after put",
{:ok, :my_result},
Tdd.Store.get_op_cache(cache_key)
)
Tdd.Store.put_op_cache(cache_key, :new_result)
test("Cache can be updated", {:ok, :new_result}, Tdd.Store.get_op_cache(cache_key))
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Store tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TypeSpecTests do
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, tested) do
current_failures = Process.get(:test_failures, [])
result = TypeSpec.normalize(tested)
# Use a custom comparison to handle potentially unsorted lists in expected values
# The normalize function *should* sort, but this makes tests more robust.
is_equal =
case {expected, result} do
{{:union, list1}, {:union, list2}} -> Enum.sort(list1) == Enum.sort(list2)
{{:intersect, list1}, {:intersect, list2}} -> Enum.sort(list1) == Enum.sort(list2)
_ -> expected == result
end
if is_equal do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" tested: #{inspect(tested)}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeSpec.normalize/1 Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Base & Simple Types ---
IO.puts("\n--- Section: Base & Simple Types ---")
test("Normalizing :any is idempotent", :any, :any)
test("Normalizing :none is idempotent", :none, :none)
test("Normalizing :atom is idempotent", :atom, :atom)
test("Normalizing a literal is idempotent", {:literal, :foo}, {:literal, :foo})
# --- Test Section: Double Negation ---
IO.puts("\n--- Section: Double Negation ---")
test("¬(¬atom) simplifies to atom", :atom, {:negation, {:negation, :atom}})
test("A single negation is preserved", {:negation, :integer}, {:negation, :integer})
test(
"¬(¬(¬atom)) simplifies to ¬atom",
{:negation, :atom},
{:negation, {:negation, {:negation, :atom}}}
)
# --- Test Section: Union Normalization ---
IO.puts("\n--- Section: Union Normalization ---")
test(
"Flattens nested unions",
{:union, [:atom, :integer, :list]},
{:union, [:integer, {:union, [:list, :atom]}]}
)
test(
"Sorts members of a union",
{:union, [:atom, :integer, :list]},
{:union, [:list, :integer, :atom]}
)
test(
"Removes duplicates in a union",
{:union, [:atom, :integer]},
{:union, [:integer, :atom, :integer]}
)
test("Simplifies a union with :none (A | none -> A)", :atom, {:union, [:atom, :none]})
test("Simplifies a union with :any (A | any -> any)", :any, {:union, [:atom, :any]})
test("An empty union simplifies to :none", :none, {:union, []})
test("A union containing only :none simplifies to :none", :none, {:union, [:none, :none]})
test("A union of a single element simplifies to the element itself", :atom, {:union, [:atom]})
# --- Test Section: Intersection Normalization ---
IO.puts("\n--- Section: Intersection Normalization ---")
test(
"Flattens nested intersections",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:integer, {:intersect, [:list, :atom]}]}
)
test(
"Sorts members of an intersection",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:list, :integer, :atom]}
)
test(
"Removes duplicates in an intersection",
{:intersect, [:atom, :integer]},
{:intersect, [:integer, :atom, :integer]}
)
test(
"Simplifies an intersection with :any (A & any -> A)",
:atom,
{:intersect, [:atom, :any]}
)
test(
"Simplifies an intersection with :none (A & none -> none)",
:none,
{:intersect, [:atom, :none]}
)
test("An empty intersection simplifies to :any", :any, {:intersect, []})
test(
"An intersection of a single element simplifies to the element itself",
:atom,
{:intersect, [:atom]}
)
# --- Test Section: Recursive Normalization ---
IO.puts("\n--- Section: Recursive Normalization ---")
test(
"Recursively normalizes elements in a tuple",
{:tuple, [:atom, {:union, [{:literal, :a}, {:literal, :b}]}]},
{:tuple, [{:union, [:atom]}, {:union, [{:literal, :a}, {:literal, :b}]}]}
)
test(
"Recursively normalizes head and tail in a cons",
{:cons, :any, {:negation, :integer}},
{:cons, {:union, [:atom, :any]}, {:negation, {:union, [:integer]}}}
)
test(
"Recursively normalizes element in list_of",
{:list_of, :list},
{:list_of, {:intersect, [:any, :list]}}
)
test(
"Recursively normalizes sub-spec in negation",
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}},
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}}
)
# --- Test Section: Complex Nested Cases ---
IO.puts("\n--- Section: Complex Nested Cases ---")
complex_spec =
{:union,
[
:atom,
# simplifies to :integer
{:intersect, [:any, :integer, {:intersect, [:integer]}]},
# simplifies to :list
{:union, [:none, :list]}
]}
test(
"Handles complex nested simplifications correctly",
{:union, [:atom, :integer, :list]},
complex_spec
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeSpec tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures:")
Enum.each(failures, &IO.puts(" - #{&1}"))
end
end
end
defmodule TddVariableTests do
alias Tdd.Variable
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, result) do
current_failures = Process.get(:test_failures, [])
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.Variable Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Variable Structure ---
IO.puts("\n--- Section: Variable Structure ---")
test("v_is_atom returns correct tuple", {0, :is_atom, nil, nil}, Variable.v_is_atom())
test("v_atom_eq returns correct tuple", {1, :value, :foo, nil}, Variable.v_atom_eq(:foo))
test("v_int_lt returns correct tuple", {2, :alt, 10, nil}, Variable.v_int_lt(10))
test(
"v_tuple_size_eq returns correct tuple",
{4, :a_size, 2, nil},
Variable.v_tuple_size_eq(2)
)
test(
"v_tuple_elem_pred nests a variable correctly",
{4, :b_element, 0, {0, :is_integer, nil, nil}},
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
test(
"v_list_is_empty returns correct tuple",
{5, :b_is_empty, nil, nil},
Variable.v_list_is_empty()
)
test(
"v_list_head_pred nests a variable correctly",
{5, :c_head, {0, :is_atom, nil, nil}, nil},
Variable.v_list_head_pred(Variable.v_is_atom())
)
# test(
# "v_list_all_elements_are nests a TypeSpec correctly",
# {5, :a_all_elements, {:union, [:atom, :integer]}, nil},
# Variable.v_list_all_elements_are(TypeSpec.normalize({:union, [:integer, :atom]}))
# )
# --- Test Section: Global Ordering ---
IO.puts("\n--- Section: Global Ordering (Based on Elixir Term Comparison) ---")
# Category 0 < Category 1
test(
"Primary type var < Atom property var",
true,
Variable.v_is_tuple() < Variable.v_atom_eq(:anything)
)
# Within Category 2: alt < beq < cgt
test(
"Integer :lt var < Integer :eq var",
true,
Variable.v_int_lt(10) < Variable.v_int_eq(10)
)
test(
"Integer :eq var < Integer :gt var",
true,
Variable.v_int_eq(10) < Variable.v_int_gt(10)
)
# Within Category 2: comparison of value
test(
"Integer :eq(5) var < Integer :eq(10) var",
true,
Variable.v_int_eq(5) < Variable.v_int_eq(10)
)
# Within Category 4: comparison of index
test(
"Tuple elem(0) var < Tuple elem(1) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(1, Variable.v_is_atom())
)
# Within Category 4, same index: comparison of nested var
test(
"Tuple elem(0, atom) var < Tuple elem(0, int) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
# IO.inspect(Variable.v_list_all_elements_are(:atom),
# label: "Variable.v_list_all_elements_are(:atom)"
# )
IO.inspect(Variable.v_list_is_empty(), label: "Variable.v_list_is_empty()")
# test(
# "List :a_all_elements var < List :b_is_empty var",
# true,
# Variable.v_list_all_elements_are(:atom) < Variable.v_list_is_empty()
# )
test(
"List :b_is_empty var < List :c_head var",
true,
Variable.v_list_is_empty() < Variable.v_list_head_pred(Variable.v_is_atom())
)
test(
"List :c_head var < List :tail var",
true,
Variable.v_list_head_pred(Variable.v_is_atom()) <
Variable.v_list_tail_pred(Variable.v_is_atom())
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Variable tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule ConsistencyEngineTests do
alias Tdd.Consistency.Engine
alias Tdd.Variable
defp test(name, expected, assumptions_map) do
result = Engine.check(assumptions_map)
# ... test reporting logic ...
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic & Implication Tests ---
IO.puts("\n--- Section: Basic & Implication Tests ---")
test("An empty assumption map is consistent", :consistent, %{})
test("A single valid assumption is consistent", :consistent, %{Variable.v_is_atom() => true})
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"Implication creates a consistent set",
:consistent,
# implies is_atom=true
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: Primary Type Exclusivity ---
IO.puts("\n--- Section: Primary Type Exclusivity ---")
test(
"Two primary types cannot both be true",
:contradiction,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
)
test(
"Two primary types implied to be true is a contradiction",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_int_eq(5) => true}
)
test(
"One primary type true and another false is consistent",
:consistent,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => false}
)
# --- Section: Atom Consistency ---
IO.puts("\n--- Section: Atom Consistency ---")
test(
"An atom cannot equal two different values",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_atom_eq(:bar) => true}
)
test(
"An atom can equal one value",
:consistent,
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: List Flat Consistency ---
IO.puts("\n--- Section: List Flat Consistency ---")
test(
"A list cannot be empty and have a head property",
:contradiction,
%{
Variable.v_list_is_empty() => true,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list can have a head property",
:consistent,
%{
Variable.v_list_is_empty() => false,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list is implied by head property",
:consistent,
# implies is_empty=false
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# --- Section: Integer Consistency ---
IO.puts("\n--- Section: Integer Consistency ---")
test("int == 5 is consistent", :consistent, %{Variable.v_int_eq(5) => true})
test("int == 5 AND int == 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_eq(10) => true
})
test("int < 10 AND int > 20 is a contradiction", :contradiction, %{
Variable.v_int_lt(10) => true,
Variable.v_int_gt(20) => true
})
test("int > 5 AND int < 4 is a contradiction", :contradiction, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(4) => true
})
# 6
test("int > 5 AND int < 7 is consistent", :consistent, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(7) => true
})
test("int == 5 AND int < 3 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_lt(3) => true
})
test("int == 5 AND int > 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(10) => true
})
test("int == 5 AND int > 3 is consistent", :consistent, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(3) => true
})
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Consistency.Engine tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TddAlgoTests do
alias Tdd.Store
alias Tdd.Variable
alias Tdd.Algo
# We need this to create stable variables
alias Tdd.TypeSpec
# --- Test Helper ---
defp test(name, expected, result) do
# A simple equality test is sufficient here.
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
# Helper to pretty print a TDD for debugging
defp print_tdd(id, indent \\ 0) do
prefix = String.duplicate(" ", indent)
case Store.get_node(id) do
{:ok, details} ->
IO.puts("#{prefix}ID #{id}: #{inspect(details)}")
case details do
{_var, y, n, d} ->
IO.puts("#{prefix} Yes ->")
print_tdd(y, indent + 1)
IO.puts("#{prefix} No ->")
print_tdd(n, indent + 1)
IO.puts("#{prefix} DC ->")
print_tdd(d, indent + 1)
_ ->
:ok
end
{:error, reason} ->
IO.puts("#{prefix}ID #{id}: Error - #{reason}")
end
end
# --- Test Runner ---
def run() do
IO.puts("\n--- Running Tdd.Algo & Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# Setup: Initialize the store and define some basic TDDs using the new modules.
Store.init()
true_id = Store.true_node_id()
false_id = Store.false_node_id()
# --- Manually build some basic type TDDs for testing ---
# t_atom = if is_atom then true else false
t_atom = Store.find_or_create_node(Variable.v_is_atom(), true_id, false_id, false_id)
# t_int = if is_int then true else false
t_int = Store.find_or_create_node(Variable.v_is_integer(), true_id, false_id, false_id)
# t_foo = if is_atom then (if value == :foo then true else false) else false
foo_val_check =
Store.find_or_create_node(Variable.v_atom_eq(:foo), true_id, false_id, false_id)
t_foo = Store.find_or_create_node(Variable.v_is_atom(), foo_val_check, false_id, false_id)
# t_bar = if is_atom then (if value == :bar then true else false) else false
bar_val_check =
Store.find_or_create_node(Variable.v_atom_eq(:bar), true_id, false_id, false_id)
t_bar = Store.find_or_create_node(Variable.v_is_atom(), bar_val_check, false_id, false_id)
# --- Section: Negate Algorithm ---
IO.puts("\n--- Section: Algo.negate ---")
negated_true = Algo.negate(true_id)
test("negate(true) is false", false_id, negated_true)
negated_false = Algo.negate(false_id)
test("negate(false) is true", true_id, negated_false)
# Double negation should be identity
test("negate(negate(t_atom)) is t_atom", t_atom, Algo.negate(Algo.negate(t_atom)))
# --- Section: Apply Algorithm (Union & Intersection) ---
IO.puts("\n--- Section: Algo.apply (raw structural operations) ---")
op_sum = fn
:true_terminal, _ -> :true_terminal
_, :true_terminal -> :true_terminal
t, :false_terminal -> t
:false_terminal, t -> t
end
op_intersect = fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
t, :true_terminal -> t
:true_terminal, t -> t
end
# atom | int
sum_atom_int = Algo.apply(:sum, op_sum, t_atom, t_int)
# The result should be a node that checks is_atom, then if false, checks is_int
# We expect a structure like: if is_atom -> true, else -> t_int
is_atom_node = {Variable.v_is_atom(), true_id, t_int, t_int}
expected_sum_structure_id =
Store.find_or_create_node(
elem(is_atom_node, 0),
elem(is_atom_node, 1),
elem(is_atom_node, 2),
elem(is_atom_node, 3)
)
test("Structure of 'atom | int' is correct", expected_sum_structure_id, sum_atom_int)
# :foo & :bar (structurally, before simplification)
# It should build a tree that checks is_atom, then value==:foo, then value==:bar
# This will be complex, but the key is that it's NOT the false_id yet.
intersect_foo_bar_raw = Algo.apply(:intersect, op_intersect, t_foo, t_bar)
test(":foo & :bar (raw) is not the false node", false, intersect_foo_bar_raw == false_id)
# --- Section: Simplify Algorithm (Flat Types) ---
IO.puts("\n--- Section: Algo.simplify (with Consistency.Engine) ---")
# An impossible structure: node that requires a value to be an atom AND an integer
# This tests the `check_primary_exclusivity` rule.
contradictory_assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
# Simplifying ANYTHING under contradictory assumptions should yield `false`.
simplified_under_contradiction = Algo.simplify(true_id, contradictory_assumptions)
test(
"Simplifying under contradictory assumptions (atom & int) results in false",
false_id,
simplified_under_contradiction
)
# Test implication: A property implies its primary type
# A value being `:foo` implies it is an atom.
assumptions_with_foo = %{Variable.v_atom_eq(:foo) => true}
# If we simplify t_int under this assumption, it should become false.
# The engine expands to `{is_atom: true, value==:foo: true}`. Then it sees that
# the t_int node's variable `is_integer` must be false (from exclusivity rule).
simplified_int_given_foo = Algo.simplify(t_int, assumptions_with_foo)
test(
"Simplifying 'integer' given 'value==:foo' results in false",
false_id,
simplified_int_given_foo
)
# Now, let's simplify the raw intersection of :foo and :bar
simplified_foo_bar = Algo.simplify(intersect_foo_bar_raw, %{})
# The simplify algorithm should discover the contradiction that an atom cannot be
# both :foo and :bar at the same time. (This requires `check_atom_consistency` to be implemented).
# For now, we stub it and test the plumbing.
# Let's test a simpler contradiction that we *have* implemented.
intersect_atom_int_raw = Algo.apply(:intersect, op_intersect, t_atom, t_int)
simplified_atom_int = Algo.simplify(intersect_atom_int_raw, %{})
test("Simplifying 'atom & int' results in false", false_id, simplified_atom_int)
# Test path collapsing
# If we simplify 'atom | int' under the assumption 'is_atom == true', it should become `true`.
simplified_sum_given_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => true})
test(
"Simplifying 'atom | int' given 'is_atom==true' results in true",
true_id,
simplified_sum_given_atom
)
# If we simplify 'atom | int' under the assumption 'is_atom == false', it should become `t_int`.
simplified_sum_given_not_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => false})
test(
"Simplifying 'atom | int' given 'is_atom==false' results in 'integer'",
t_int,
simplified_sum_given_not_atom
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Algo tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
# Optional: print details of failed tests if needed
end
end
end
defmodule TypeReconstructorTests do
alias Tdd.TypeReconstructor
alias Tdd.Variable
alias Tdd.TypeSpec
defp test(name, expected_spec, assumptions) do
# Normalize both expected and result for a canonical comparison
expected = TypeSpec.normalize(expected_spec)
result = TypeSpec.normalize(TypeReconstructor.spec_from_assumptions(assumptions))
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeReconstructor Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic Flat Reconstructions ---
IO.puts("\n--- Section: Basic Flat Reconstructions ---")
test("is_atom=true -> atom", :atom, %{Variable.v_is_atom() => true})
test("is_atom=false -> ¬atom", {:negation, :atom}, %{Variable.v_is_atom() => false})
test(
"is_atom=true AND value==:foo -> :foo",
{:literal, :foo},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => true}
)
test(
"is_atom=true AND value!=:foo -> atom & ¬:foo",
{:intersect, [:atom, {:negation, {:literal, :foo}}]},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => false}
)
test(
"is_integer=true AND int==5 -> 5",
{:literal, 5},
%{Variable.v_is_integer() => true, Variable.v_int_eq(5) => true}
)
test(
"is_list=true AND is_empty=true -> []",
{:literal, []},
%{Variable.v_is_list() => true, Variable.v_list_is_empty() => true}
)
# --- Section: Combined Flat Reconstructions ---
IO.puts("\n--- Section: Combined Flat Reconstructions ---")
test(
"int > 10 AND int < 20",
# This is complex. Our simple reconstructor makes two separate ranges.
# A more advanced one would combine them into a single {:integer_range, 11, 19}.
# For now, we test the current behavior.
{:intersect,
[
:integer,
{:integer_range, 11, :pos_inf},
{:integer_range, :neg_inf, 19}
]},
%{Variable.v_int_gt(10) => true, Variable.v_int_lt(20) => true}
)
# --- Section: Recursive Reconstructions (Simplified) ---
IO.puts("\n--- Section: Recursive Reconstructions ---")
# This tests the partitioning and recursive call logic.
# Our simple implementation of recursive cases means we can only test simple things.
test(
"head is an atom",
{:intersect, [:list, {:cons, :atom, :any}]},
# Assumption for `is_list=true` is implied by `v_list_head_pred`
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# Note: The recursive tests are limited by the simplifications made in the
# implementation (e.g., tuple reconstruction). A full implementation would
# require more context (like tuple size) to be passed down.
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeReconstructor tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule CompilerAlgoTests do
alias Tdd.Compiler
alias Tdd.Store
alias Tdd.Algo
# High-level helpers that mimic the final API
# defp is_subtype(spec1, spec2) do
# id1 = Compiler.spec_to_id(spec1)
# id2 = Compiler.spec_to_id(spec2)
# # The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
# neg_id2 = Algo.negate(id2)
#
# op_intersect = fn
# :false_terminal, _ -> :false_terminal
# _, :false_terminal -> :false_terminal
# t, :true_terminal -> t
# :true_terminal, t -> t
# # Default case for non-terminal nodes, though apply handles recursion
# _t1, _t2 -> :non_terminal
# end
#
# intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
# final_id = Algo.simplify(intersect_id)
# final_id == Store.false_node_id()
# end
defp are_equivalent(spec1, spec2) do
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
end
defp is_contradiction(spec) do
Compiler.spec_to_id(spec) == Store.false_node_id()
end
defp test_subtype(name, expected, s1, s2), do: test(name, expected, Compiler.is_subtype(s1, s2))
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1))
defp test(name, exp, res) do
is_ok = exp == res
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(exp)}")
IO.puts(" Got: #{inspect(res)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Compiler & Algo Integration Tests ---")
Process.put(:test_failures, [])
# Setup
Tdd.Store.init()
# --- Section: Basic Compilation & Equivalence ---
IO.puts("\n--- Section: Basic Equivalences ---")
test_equiv("atom & any == atom", true, {:intersect, [:atom, :any]}, :atom)
test_equiv("atom | none == atom", true, {:union, [:atom, :none]}, :atom)
test_equiv("atom & int == none", true, {:intersect, [:atom, :integer]}, :none)
test_equiv("¬(¬atom) == atom", true, {:negation, {:negation, :atom}}, :atom)
test_equiv("atom | atom == atom", true, {:union, [:atom, :atom]}, :atom)
# --- Section: Basic Subtyping ---
IO.puts("\n--- Section: Basic Subtyping ---")
Tdd.Debug.enable_tracing()
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
Tdd.Debug.disable_tracing()
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
test_subtype("none <: atom", true, :none, :atom)
test_subtype("atom <: any", true, :atom, :any)
# --- Section: Integer Range Logic ---
IO.puts("\n--- Section: Integer Range Logic ---")
int_5_to_10 = {:integer_range, 5, 10}
int_7_to_8 = {:integer_range, 7, 8}
int_15_to_20 = {:integer_range, 15, 20}
int_0_to_100 = {:integer_range, 0, 100}
test_subtype("range(7..8) <: range(5..10)", true, int_7_to_8, int_5_to_10)
test_subtype("range(5..10) <: range(7..8)", false, int_5_to_10, int_7_to_8)
test_subtype("range(5..10) <: range(15..20)", false, int_5_to_10, int_15_to_20)
test_equiv(
"range(5..10) & range(7..8) == range(7..8)",
true,
{:intersect, [int_5_to_10, int_7_to_8]},
int_7_to_8
)
test_equiv(
"range(5..10) & range(0..100) == range(5..10)",
true,
{:intersect, [int_5_to_10, int_0_to_100]},
int_5_to_10
)
test_equiv(
"range(5..10) | range(7..8) == range(5..10)",
true,
{:union, [int_5_to_10, int_7_to_8]},
int_5_to_10
)
# --- Section: Contradictions & Simplifications ---
IO.puts("\n--- Section: Contradictions & Simplifications ---")
test_contradiction("atom & integer").({:intersect, [:atom, :integer]})
test_contradiction(":foo & :bar").({:intersect, [{:literal, :foo}, {:literal, :bar}]})
test_contradiction("atom & (int==5)").({:intersect, [:atom, {:literal, 5}]})
test_contradiction("range(5..10) & range(15..20)").({:intersect, [int_5_to_10, int_15_to_20]})
test_contradiction("integer & ¬integer").({:intersect, [:integer, {:negation, :integer}]})
# --- Section: Subtype Reduction in Normalization ---
IO.puts("\n--- Section: Subtype Reduction Logic ---")
test_equiv(
"(:foo | :bar | atom) simplifies to atom",
true,
{:union, [{:literal, :foo}, {:literal, :bar}, :atom]},
:atom
)
test_equiv(
"(range(5..10) | integer) simplifies to integer",
true,
{:union, [int_5_to_10, :integer]},
:integer
)
test_equiv(
"(:foo & atom) simplifies to :foo",
true,
{:intersect, [{:literal, :foo}, :atom]},
{:literal, :foo}
)
test_equiv(
"(range(5..10) & integer) simplifies to range(5..10)",
true,
{:intersect, [int_5_to_10, :integer]},
int_5_to_10
)
# --- Section: Logical Laws (Distribution and De Morgan's) ---
IO.puts("\n--- Section: Logical Laws ---")
# De Morgan's Law: ¬(A | B) == (¬A & ¬B)
spec_not_a_or_b = {:negation, {:union, [:atom, :integer]}}
spec_not_a_and_not_b = {:intersect, [{:negation, :atom}, {:negation, :integer}]}
test_equiv(
"De Morgan's (¬(A|B) == ¬A & ¬B) holds",
true,
spec_not_a_or_b,
spec_not_a_and_not_b
)
# De Morgan's Law: ¬(A & B) == (¬A | ¬B)
spec_not_a_and_b = {:negation, {:intersect, [{:literal, :foo}, int_5_to_10]}}
spec_not_a_or_not_b = {:union, [{:negation, {:literal, :foo}}, {:negation, int_5_to_10}]}
test_equiv(
"De Morgan's (¬(A&B) == ¬A | ¬B) holds",
true,
spec_not_a_and_b,
spec_not_a_or_not_b
)
# Distributive Law: A & (B | C) == (A & B) | (A & C)
spec_a = :integer
spec_b = {:integer_range, 0, 10}
spec_c = {:integer_range, 20, 30}
spec_dist_lhs = {:intersect, [spec_a, {:union, [spec_b, spec_c]}]}
spec_dist_rhs = {:union, [{:intersect, [spec_a, spec_b]}, {:intersect, [spec_a, spec_c]}]}
test_equiv(
"Distributive Law (A & (B|C)) holds",
true,
spec_dist_lhs,
spec_dist_rhs
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Compiler & Algo Integration tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TddCompilerRecursiveTests do
alias Tdd.Compiler
alias Tdd.Store
alias Tdd.Algo
alias Tdd.TypeSpec
# --- Test Runner ---
def run() do
IO.puts("\n--- Running Tdd.Compiler Recursive Type Tests ---")
Process.put(:test_failures, [])
Tdd.Store.init()
# --- Tests for :cons ---
IO.puts("\n--- Section: :cons ---")
test_subtype(":cons is a subtype of :list", true, {:cons, :atom, :list}, :list)
test_subtype(
":cons is not a subtype of the empty list",
false,
{:cons, :atom, :list},
{:literal, []}
)
test_subtype(
"cons(integer, list) is a subtype of cons(any, any)",
true,
{:cons, :integer, :list},
{:cons, :any, :any}
)
test_subtype(
"cons(any, any) is not a subtype of cons(integer, list)",
false,
{:cons, :any, :any},
{:cons, :integer, :list}
)
# --- Tests for :tuple ---
IO.puts("\n--- Section: :tuple ---")
test_subtype(
"{:tuple, [atom, int]} is a subtype of :tuple",
true,
{:tuple, [:atom, :integer]},
:tuple
)
test_subtype(
"{:tuple, [atom, int]} is not a subtype of :list",
false,
{:tuple, [:atom, :integer]},
:list
)
test_subtype(
"a tuple of size 2 is not a subtype of a tuple of size 3",
false,
{:tuple, [:atom, :integer]},
{:tuple, [:atom, :integer, :list]}
)
spec_specific = {:tuple, [{:literal, :a}, {:literal, 1}]}
spec_general = {:tuple, [:atom, :integer]}
spec_unrelated = {:tuple, [:list, :list]}
test_subtype(
"subtype check works element-wise (specific <: general)",
true,
spec_specific,
spec_general
)
test_subtype(
"subtype check works element-wise (general </: specific)",
false,
spec_general,
spec_specific
)
test_subtype(
"subtype check works element-wise (specific </: unrelated)",
false,
spec_specific,
spec_unrelated
)
# --- Tests for :list_of ---
IO.puts("\n--- Section: :list_of ---")
test_subtype("list_of(E) is a subtype of list", true, {:list_of, :integer}, :list)
test_subtype(
"empty list is a subtype of any list_of(E)",
true,
{:literal, []},
{:list_of, :integer}
)
test_subtype(
"list_of(subtype) is a subtype of list_of(supertype)",
true,
{:list_of, {:literal, 1}},
{:list_of, :integer}
)
test_subtype(
"list_of(supertype) is not a subtype of list_of(subtype)",
false,
{:list_of, :integer},
{:list_of, {:literal, 1}}
)
list_with_int = {:cons, :integer, {:literal, []}}
list_of_atoms = {:list_of, :atom}
test_subtype(
"a list with a wrong element type is not a subtype of list_of(E)",
false,
list_with_int,
list_of_atoms
)
list_with_atom = {:cons, :atom, {:literal, []}}
Tdd.Debug.run(fn ->
test_subtype(
"a list with a correct element type is a subtype of list_of(E)",
true,
list_with_atom,
list_of_atoms
)
end)
# --- Equivalence tests ---
IO.puts("\n--- Section: Equivalence ---")
e_spec = :integer
list_of_e = {:list_of, e_spec}
recursive_def = TypeSpec.normalize({:union, [{:literal, []}, {:cons, e_spec, list_of_e}]})
test_equiv(
"the recursive definition holds: list_of(E) == [] | cons(E, list_of(E))",
true,
list_of_e,
recursive_def
)
int_range_1 = {:integer_range, 0, 10}
int_range_2 = {:integer_range, 5, 15}
int_range_intersect = {:integer_range, 5, 10}
list_of_1 = {:list_of, int_range_1}
list_of_2 = {:list_of, int_range_2}
list_of_intersect = {:list_of, int_range_intersect}
combined_spec = {:intersect, [list_of_1, list_of_2]}
test_equiv(
"intersection of two list_of types works correctly",
true,
combined_spec,
list_of_intersect
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Compiler Recursive Type tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
# --- Private Test Helpers ---
defp test(name, expected, result) do
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
defp test_equiv(name, expected, spec1, spec2) do
result = Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
test(name, expected, result)
end
defp test_subtype(name, expected, spec1, spec2) do
result = Compiler.is_subtype(spec1, spec2)
test(name, expected, result)
end
# --- Private Logic Helpers ---
defp do_is_subtype(spec1, spec2) do
id1 = Compiler.spec_to_id(spec1)
id2 = Compiler.spec_to_id(spec2)
# A <: B <=> A & ~B == none
intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, Algo.negate(id2))
final_id = Algo.simplify(intersect_id)
final_id == Store.false_node_id()
end
defp op_intersect_terminals(u1_details, u2_details) do
case {u1_details, u2_details} do
{:false_terminal, _} -> :false_terminal
{_, :false_terminal} -> :false_terminal
{:true_terminal, t2} -> t2
{t1, :true_terminal} -> t1
end
end
end
# Ensure the tracing state manager is started
# Tdd.Debug.init()
Process.sleep(100)
# To run this new test, add the following to your main test runner script:
# TddCompilerRecursiveTests.run()
TypeSpecTests.run()
TddStoreTests.run()
TddVariableTests.run()
TddAlgoTests.run()
ConsistencyEngineTests.run()
TypeReconstructorTests.run()
CompilerAlgoTests.run()
TddCompilerRecursiveTests.run()