elipl/new.exs
2025-06-18 16:13:29 +02:00

2438 lines
80 KiB
Elixir

defmodule Tdd.TypeSpec do
@moduledoc """
Defines the `TypeSpec` structure and functions for its manipulation.
A `TypeSpec` is a stable, structural, and declarative representation of a type.
It serves as the primary language for defining and interacting with types in the
higher-level system, abstracting away the underlying TDD implementation.
All `TypeSpec`s should be passed through `normalize/1` before being used in
caching or compilation to ensure a canonical representation.
"""
@typedoc "A stable, structural representation of a type."
# --- Core Types ---
@type t ::
:any
| :none
| :atom
| :integer
| :list
| :tuple
# (Add :binary, :function, :pid, etc. here as they are implemented)
# --- Literal Value Type ---
| {:literal, term()}
# --- Set-Theoretic Combinators ---
# Note: The `normalize/1` function guarantees that the lists in
# :union and :intersect are sorted, unique, and flattened.
| {:union, [t()]}
| {:intersect, [t()]}
| {:negation, t()}
# --- Parameterized Structural Types ---
| {:tuple, [t()]}
| {:cons, head :: t(), tail :: t()}
| {:list_of, element :: t()}
# --- Integer Range (Example of property-based type) ---
| {:integer_range, min :: integer() | :neg_inf, max :: integer() | :pos_inf}
# --- For Polymorphism (Future Use) ---
| {:type_var, atom()}
@doc """
Converts a `TypeSpec` into its canonical (normalized) form.
Normalization is crucial for reliable caching and simplifying downstream logic.
It performs several key operations:
1. **Flattens nested unions and intersections:**
`{:union, [A, {:union, [B, C]}]}` becomes `{:union, [A, B, C]}`.
2. **Sorts and uniqs members of unions and intersections:**
`{:union, [C, A, A]}` becomes `{:union, [A, C]}`.
3. **Applies logical simplification rules (idempotency, annihilation):**
- `A | A` -> `A`
- `A | none` -> `A`
- `A & any` -> `A`
- `A & none` -> `none`
- `A | any` -> `any`
- `¬(¬A)` -> `A`
- An intersection containing both `A` and `¬A` simplifies. (This is
better handled by the TDD compiler, but basic checks can happen here).
4. **Recursively normalizes all sub-specs.**
"""
@spec normalize(t()) :: t()
def normalize(spec) do
case spec do
# Base cases are unchanged
s when is_atom(s) -> s
{:literal, _} -> spec
{:type_var, _} -> spec
# Recursive cases now call helper functions
{:negation, sub_spec} -> normalize_negation(sub_spec)
{:tuple, elements} -> {:tuple, Enum.map(elements, &normalize/1)}
{:cons, head, tail} -> {:cons, normalize(head), normalize(tail)}
{:list_of, element} -> {:list_of, normalize(element)}
# A new rule for integer ranges
{:integer_range, min, max} -> normalize_integer_range(min, max)
{:union, members} -> normalize_union(members)
{:intersect, members} -> normalize_intersection(members)
end
end
# ------------------------------------------------------------------
# Private Normalization Helpers
# ------------------------------------------------------------------
defp normalize_negation(sub_spec) do
normalized_sub = normalize(sub_spec)
case normalized_sub do
# ¬(¬A) -> A
{:negation, inner_spec} -> inner_spec
:any -> :none
:none -> :any
_ -> {:negation, normalized_sub}
end
end
defp normalize_integer_range(min, max) do
# An invalid range simplifies to `none`.
if is_integer(min) and is_integer(max) and min > max do
:none
else
# An intersection with integer is implied, so we add it for canonical form.
# TODO: revisit
{:intersect, [:integer, {:integer_range, min, max}]}
{:integer_range, min, max}
end
end
defp normalize_union(members) do
# 1. Recursively normalize and flatten members
normalized_and_flattened =
Enum.flat_map(members, fn member ->
normalized = normalize(member)
case normalized,
do:
(
{:union, sub_members} -> sub_members
_ -> [normalized]
)
end)
# 2. Apply simplification rules
simplified_members =
normalized_and_flattened
# A | none -> A
|> Enum.reject(&(&1 == :none))
|> MapSet.new()
if MapSet.member?(simplified_members, :any) do
# A | any -> any
:any
else
# 3. NEW: Reduce by removing subtypes
# If we have {A, B} and A <: B, the union is just B. So we keep only supersets.
# We achieve this by removing any member that is a subtype of another member.
reduced_members =
Enum.reject(simplified_members, fn member_to_check ->
Enum.any?(simplified_members, fn other_member ->
member_to_check != other_member and is_subtype?(member_to_check, other_member)
end)
end)
# 4. Finalize the structure
case reduced_members do
[] -> :none
[single_member] -> single_member
list -> {:union, Enum.sort(list)}
end
end
end
defp normalize_intersection(members) do
# IO.inspect("Normalize intersection")
# 1. Recursively normalize and flatten, but also add implied supertypes
normalized_and_flattened =
Enum.flat_map(members, fn member ->
# IO.inspect(member, label: "normalize member")
normalized = normalize(member)
# Expand a type into itself and its implied supertypes
# e.g., `:foo` becomes `[:foo, :atom]`
expanded =
case normalized do
{:intersect, sub_members} -> sub_members
_ -> get_supertypes(normalized)
end
expanded
end)
# 2. Apply simplification rules
simplified_members =
normalized_and_flattened
# A & any -> A
|> Enum.reject(&(&1 == :any))
|> MapSet.new()
if MapSet.member?(simplified_members, :none) do
# A & none -> none
:none
else
# 3. NEW: Reduce by removing supertypes
# IO.inspect("Reduce by removing supertypes")
# If we have {A, B} and A <: B, the intersection is just A. So we keep only subsets.
# We achieve this by removing any member for which a proper subtype exists in the set.
reduced_members =
Enum.reject(simplified_members, fn member_to_check ->
Enum.any?(simplified_members, fn other_member ->
member_to_check != other_member and is_subtype?(other_member, member_to_check)
end)
end)
# 4. Finalize the structure
# IO.inspect("4. Finalize the structure")
case reduced_members do
[] -> :any
[single_member] -> single_member
list -> {:intersect, Enum.sort(list)}
end
end
end
# ------------------------------------------------------------------
# Private Semantic Helpers
# ------------------------------------------------------------------
@doc """
A preliminary, non-TDD check if `spec1` is a subtype of `spec2`.
This check is not exhaustive but covers many common, structural cases,
allowing for significant simplification at the `TypeSpec` level.
"""
@spec is_subtype?(t(), t()) :: boolean
def is_subtype?(spec1, spec2) do
# Avoid infinite recursion by not re-normalizing.
# The callers are assumed to be working with normalized data.
# Base cases are handled first for efficiency.
cond do
spec1 == spec2 -> true
spec1 == :none -> true
spec2 == :any -> true
spec1 == :any or spec2 == :none -> false
# Defer to pattern-matching helper
true -> do_is_subtype?(spec1, spec2)
end
end
# Private helper that uses `case` for proper pattern matching.
defp do_is_subtype?(spec1, spec2) do
case {spec1, spec2} do
# --- Set-theoretic rules ---
# (A | B) <: C if A <: C and B <: C
{{:union, members1}, _} ->
Enum.all?(members1, &is_subtype?(&1, spec2))
# A <: (B | C) if A <: B or A <: C
{_, {:union, members2}} ->
Enum.any?(members2, &is_subtype?(spec1, &1))
# (A & B) <: C if A <: C or B <: C
{{:intersect, members1}, _} ->
Enum.any?(members1, &is_subtype?(&1, spec2))
# A <: (B & C) if A <: B and A <: C
{_, {:intersect, members2}} ->
Enum.all?(members2, &is_subtype?(spec1, &1))
# --- Literal and Base Type Rules ---
{s1, s2} when is_atom(s1) and is_atom(s2) ->
s1 == s2
{{:literal, v1}, {:literal, v2}} ->
v1 == v2
{{:literal, val}, :atom} when is_atom(val) ->
true
{{:literal, val}, :integer} when is_integer(val) ->
true
{{:literal, val}, :list} when is_list(val) ->
true
{{:literal, val}, :tuple} when is_tuple(val) ->
true
# --- List and Cons Rules ---
{{:literal, []}, :list} ->
true
{{:cons, _, _}, :list} ->
true
{{:list_of, _}, :list} ->
true
{{:cons, h1, t1}, {:cons, h2, t2}} ->
is_subtype?(h1, h2) and is_subtype?(t1, t2)
{{:list_of, e1}, {:list_of, e2}} ->
is_subtype?(e1, e2)
{{:cons, h, t}, {:list_of, x}} ->
is_subtype?(h, x) and is_subtype?(t, {:list_of, x})
{{:literal, []}, {:list_of, _}} ->
true
# --- Tuple Rules ---
{{:literal, {}}, :tuple} ->
true
{{:tuple, _}, :tuple} ->
true
{{:tuple, elems1}, {:tuple, elems2}} when length(elems1) == length(elems2) ->
Enum.zip_with(elems1, elems2, &is_subtype?/2) |> Enum.all?()
# --- Integer Range Rules ---
{{:integer_range, _, _}, :integer} ->
true
{{:integer_range, min1, max1}, {:integer_range, min2, max2}} ->
min1_gte_min2 = if min1 == :neg_inf, do: true, else: min2 != :neg_inf and min1 >= min2
max1_lte_max2 = if max1 == :pos_inf, do: true, else: max2 != :pos_inf and max1 <= max2
min1_gte_min2 and max1_lte_max2
{{:literal, val}, {:integer_range, min, max}} when is_integer(val) ->
(min == :neg_inf or val >= min) and (max == :pos_inf or val <= max)
# --- Default fallback ---
# If no specific rule matches, they are not considered subtypes.
{_, _} ->
false
end
end
# Gets a list of immediate, known supertypes for a given simple spec.
defp get_supertypes(spec) do
supertypes =
case spec do
{:literal, val} when is_atom(val) -> [:atom]
{:literal, val} when is_integer(val) -> [:integer]
{:literal, val} when is_list(val) -> [:list]
{:literal, val} when is_tuple(val) -> [:tuple]
{:cons, _, _} -> [:list]
{:list_of, _} -> [:list]
{:tuple, _} -> [:tuple]
{:integer_range, _, _} -> [:integer]
_ -> []
end
# Use a MapSet to ensure the spec and its supertypes are unique.
MapSet.to_list(MapSet.new([spec | supertypes]))
end
end
defmodule Tdd.Store do
@moduledoc """
Manages the state of the TDD system's node graph and operation cache.
This module acts as the stateful backend for the TDD algorithms. It is
responsible for creating unique, shared nodes (ensuring structural canonicity)
and for memoizing the results of expensive operations.
It is intentionally agnostic about the *meaning* of the variables within the
nodes; it treats them as opaque, comparable terms. The logic for interpreting
these variables resides in higher-level modules like `Tdd.Algo` and
`Tdd.Consistency.Engine`.
For simplicity, this implementation uses the Process dictionary for state.
In a production, concurrent application, this would be replaced by a `GenServer`
to ensure safe, serialized access to the shared TDD state.
"""
# --- State Keys ---
@nodes_key :tdd_nodes
@node_by_id_key :tdd_node_by_id
@next_id_key :tdd_next_id
@op_cache_key :tdd_op_cache
# --- Terminal Node IDs ---
@false_node_id 0
@true_node_id 1
# --- Public API ---
@doc "Initializes the TDD store in the current process."
def init do
# The main lookup table: {variable, y, n, d} -> id
Process.put(@nodes_key, %{})
# The reverse lookup table: id -> {variable, y, n, d} or :terminal
Process.put(@node_by_id_key, %{
@false_node_id => :false_terminal,
@true_node_id => :true_terminal
})
# The next available integer ID for a new node.
Process.put(@next_id_key, 2)
# The cache for memoizing operation results: {op, args} -> id
Process.put(@op_cache_key, %{})
:ok
end
@doc "Returns the ID for the TRUE terminal node (the 'any' type)."
@spec true_node_id() :: non_neg_integer()
def true_node_id, do: @true_node_id
@doc "Returns the ID for the FALSE terminal node (the 'none' type)."
@spec false_node_id() :: non_neg_integer()
def false_node_id, do: @false_node_id
@doc "Retrieves the details of a node by its ID."
@spec get_node(non_neg_integer()) ::
{:ok,
{variable :: term(), yes_id :: non_neg_integer(), no_id :: non_neg_integer(),
dc_id :: non_neg_integer()}}
| {:ok, :true_terminal | :false_terminal}
| {:error, :not_found}
def get_node(id) do
case Process.get(@node_by_id_key, %{}) do
%{^id => details} -> {:ok, details}
%{} -> {:error, :not_found}
end
end
@doc """
Finds an existing node that matches the structure or creates a new one.
This is the core function for ensuring structural sharing (part of the "Reduced"
property of ROBDDs). It also implements a fundamental reduction rule: if all
children of a node are identical, the node is redundant and is replaced by
its child.
"""
@spec find_or_create_node(
variable :: term(),
yes_id :: non_neg_integer(),
no_id :: non_neg_integer(),
dc_id :: non_neg_integer()
) :: non_neg_integer()
def find_or_create_node(variable, yes_id, no_id, dc_id) do
# Basic reduction rule: a node whose test is irrelevant is redundant.
if yes_id == no_id && yes_id == dc_id do
yes_id
else
node_tuple = {variable, yes_id, no_id, dc_id}
nodes = Process.get(@nodes_key, %{})
case Map.get(nodes, node_tuple) do
# Node already exists, return its ID for structural sharing.
id when is_integer(id) ->
id
# Node does not exist, create it.
nil ->
next_id = Process.get(@next_id_key)
node_by_id = Process.get(@node_by_id_key)
# Update all state tables
Process.put(@nodes_key, Map.put(nodes, node_tuple, next_id))
Process.put(@node_by_id_key, Map.put(node_by_id, next_id, node_tuple))
Process.put(@next_id_key, next_id + 1)
next_id
end
end
end
@doc "Retrieves a result from the operation cache."
@spec get_op_cache(term()) :: {:ok, term()} | :not_found
def get_op_cache(cache_key) do
case Process.get(@op_cache_key, %{}) do
%{^cache_key => result} -> {:ok, result}
%{} -> :not_found
end
end
@doc "Puts a result into the operation cache."
@spec put_op_cache(term(), term()) :: :ok
def put_op_cache(cache_key, result) do
# Using `get_and_update_in` would be safer but this is fine for this context.
cache = Process.get(@op_cache_key, %{})
Process.put(@op_cache_key, Map.put(cache, cache_key, result))
:ok
end
end
defmodule Tdd.Variable do
@moduledoc """
Defines the canonical structure for all Tdd predicate variables.
The structure `{category, predicate, value, padding}` is used to enforce a
stable global ordering. All variables are 4-element tuples to ensure that
Elixir's tuple-size-first comparison rule does not interfere with the
intended predicate ordering within a category.
"""
alias Tdd.TypeSpec
# --- Category 0: Primary Type Discriminators ---
# Padding with `nil` to make them 4-element tuples.
@spec v_is_atom() :: term()
def v_is_atom, do: {0, :is_atom, nil, nil}
@spec v_is_integer() :: term()
def v_is_integer, do: {0, :is_integer, nil, nil}
@spec v_is_list() :: term()
def v_is_list, do: {0, :is_list, nil, nil}
@spec v_is_tuple() :: term()
def v_is_tuple, do: {0, :is_tuple, nil, nil}
# --- Category 1: Atom Properties ---
@spec v_atom_eq(atom()) :: term()
def v_atom_eq(atom_val) when is_atom(atom_val), do: {1, :value, atom_val, nil}
# --- Category 2: Integer Properties ---
@spec v_int_lt(integer()) :: term()
def v_int_lt(n) when is_integer(n), do: {2, :alt, n, nil}
@spec v_int_eq(integer()) :: term()
def v_int_eq(n) when is_integer(n), do: {2, :beq, n, nil}
@spec v_int_gt(integer()) :: term()
def v_int_gt(n) when is_integer(n), do: {2, :cgt, n, nil}
# --- Category 4: Tuple Properties ---
# The most complex var here is `:b_element` with index and nested var.
# So all vars in this category need to be at least 4-element.
@spec v_tuple_size_eq(non_neg_integer()) :: term()
def v_tuple_size_eq(size) when is_integer(size) and size >= 0, do: {4, :a_size, size, nil}
@spec v_tuple_elem_pred(non_neg_integer(), term()) :: term()
def v_tuple_elem_pred(index, nested_pred_var) when is_integer(index) and index >= 0 do
{4, :b_element, index, nested_pred_var}
end
# --- Category 5: List Properties ---
# All are now 4-element tuples. The sorting will be correct.
@spec v_list_all_elements_are(TypeSpec.t()) :: term()
def v_list_all_elements_are(element_spec), do: {5, :a_all_elements, element_spec, nil}
@spec v_list_is_empty() :: term()
def v_list_is_empty, do: {5, :b_is_empty, nil, nil}
@spec v_list_head_pred(term()) :: term()
def v_list_head_pred(nested_pred_var), do: {5, :c_head, nested_pred_var, nil}
@spec v_list_tail_pred(term()) :: term()
def v_list_tail_pred(nested_pred_var), do: {5, :d_tail, nested_pred_var, nil}
end
defmodule Tdd.Predicate.Info do
@moduledoc "A knowledge base for the properties of TDD predicate variables."
alias Tdd.Variable
@doc "Returns a map of traits for a given predicate variable."
@spec get_traits(term()) :: map() | nil
def get_traits({0, :is_atom, _, _}), do: %{type: :primary, category: :atom}
def get_traits({0, :is_integer, _, _}), do: %{type: :primary, category: :integer}
def get_traits({0, :is_list, _, _}), do: %{type: :primary, category: :list}
def get_traits({0, :is_tuple, _, _}), do: %{type: :primary, category: :tuple}
def get_traits({1, :value, _val, _}) do
%{type: :atom_value, category: :atom, implies: [{Variable.v_is_atom(), true}]}
end
def get_traits({2, :alt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :beq, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :cgt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({4, :a_size, _, _}) do
%{type: :tuple_prop, category: :tuple, implies: [{Variable.v_is_tuple(), true}]}
end
def get_traits({4, :b_element, index, _nested_var}) do
%{
type: :tuple_recursive,
category: :tuple,
sub_key: {:elem, index},
implies: [{Variable.v_is_tuple(), true}]
}
end
def get_traits({5, :a_all_elements, element_spec, _}) do
%{
type: :list_recursive_ambient,
category: :list,
ambient_constraint_spec: element_spec,
implies: [{Variable.v_is_list(), true}]
}
end
def get_traits({5, :b_is_empty, _, _}) do
%{type: :list_prop, category: :list, implies: [{Variable.v_is_list(), true}]}
end
def get_traits({5, :c_head, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :head,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
def get_traits({5, :d_tail, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :tail,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
# Default case for unknown variables
def get_traits(_), do: nil
end
# in a new file, e.g., lib/tdd/consistency/engine.ex
defmodule Tdd.Consistency.Engine do
@moduledoc """
A rule-based engine for checking the semantic consistency of a set of assumptions.
This engine is the "oracle" for the `Tdd.Algo.simplify/2` function. It takes
a set of assumptions about predicate variables (e.g., `{is_atom, true}`,
`{value == :foo, true}`) and determines if that set is logically consistent.
The process involves two main steps:
1. **Expansion**: The initial assumptions are expanded with all their logical
implications until a fixed point is reached. For example, `{value == :foo, true}`
implies `{is_atom, true}`.
2. **Flat Check**: The fully expanded set of assumptions is checked against a
series of rules for contradictions (e.g., a value cannot be both an atom
and an integer).
The difficult problem of recursive consistency (checking sub-problems like a
list's head against an ambient constraint) is currently disabled, as it creates
a logical cycle with the TDD compiler.
"""
alias Tdd.Predicate.Info
alias Tdd.TypeReconstructor
alias Tdd.Compiler
alias Tdd.Variable
@doc """
Checks if a map of assumptions is logically consistent.
Returns `:consistent` or `:contradiction`.
"""
@spec check(map()) :: :consistent | :contradiction
def check(assumptions) do
with {:ok, expanded} <- expand_with_implications(assumptions),
:ok <- check_flat_consistency(expanded) do
# --- RECURSIVE CHECK DISABLED ---
# The call to `check_recursive_consistency/2` is disabled because it
# introduces a logical cycle:
# simplify -> check -> spec_to_id -> simplify
# Solving this requires a more advanced architecture, such as passing a
# compiler context/handle to break the recursion. For now, we rely
# on the power of the main `simplify` algorithm to handle these recursive
# constraints by construction, and we focus on robust flat checks.
# :ok <- check_recursive_consistency(expanded, assumptions)
:consistent
else
:error -> :contradiction
end
end
# --- Step 1: Implication Expansion ---
@doc "Expands an assumption map with all its logical implications."
defp expand_with_implications(assumptions) do
# Start with the initial set of assumptions and expand until a fixed point is reached.
expand_loop(assumptions, assumptions)
end
defp expand_loop(new_assumptions, all_assumptions) do
# For each of the newly added assumptions, find its implications.
implications =
Enum.flat_map(new_assumptions, fn
# An assumption `var == true` may have implications.
{var, true} -> Map.get(Info.get_traits(var) || %{}, :implies, [])
# An assumption `var == false` has no simple implications in our current model.
_ -> []
end)
# Attempt to merge the new implications into the set of all assumptions.
case Enum.reduce(implications, {:ok, %{}}, fn {implied_var, implied_val}, acc ->
reduce_implication({implied_var, implied_val}, all_assumptions, acc)
end) do
{:error, :contradiction} ->
# A direct contradiction was found during expansion.
:error
{:ok, newly_added} when map_size(newly_added) == 0 ->
# Fixed point reached: no new, non-conflicting implications were found.
{:ok, all_assumptions}
{:ok, newly_added} ->
# Recursively expand with the newly found assumptions.
expand_loop(newly_added, Map.merge(all_assumptions, newly_added))
end
end
# Helper for the implication reducer.
defp reduce_implication({var, val}, all_assumptions, {:ok, new_acc}) do
case Map.get(all_assumptions, var) do
nil ->
# This is a new piece of information. Add it to the set of newly_added things.
{:ok, Map.put(new_acc, var, val)}
^val ->
# We already knew this. Continue without adding.
{:ok, new_acc}
_other_val ->
# Contradiction! The implication conflicts with an existing assumption.
{:error, :contradiction}
end
end
defp reduce_implication(_implication, _all_assumptions, error_acc), do: error_acc
# --- Step 2: Flat Consistency Checks ---
defp check_flat_consistency(assumptions) do
with :ok <- check_primary_type_exclusivity(assumptions),
:ok <- check_atom_consistency(assumptions),
:ok <- check_list_consistency(assumptions),
:ok <- check_integer_consistency(assumptions) do
:ok
else
:error -> :error
end
end
defp check_primary_type_exclusivity(assumptions) do
primary_types = [
Variable.v_is_atom(),
Variable.v_is_integer(),
Variable.v_is_list(),
Variable.v_is_tuple()
]
true_primary_types = Enum.count(primary_types, &(Map.get(assumptions, &1) == true))
if true_primary_types > 1, do: :error, else: :ok
end
defp check_atom_consistency(assumptions) do
true_atom_values =
Enum.reduce(assumptions, MapSet.new(), fn
{{1, :value, atom_val, _}, true}, acc -> MapSet.put(acc, atom_val)
_, acc -> acc
end)
if MapSet.size(true_atom_values) > 1, do: :error, else: :ok
end
defp check_list_consistency(assumptions) do
# This check is actually redundant if `expand_with_implications` works correctly,
# as the `implies` rules for head/tail would create an explicit contradiction
# with `is_empty == true`. However, it serves as a good safeguard.
is_empty = Map.get(assumptions, Variable.v_list_is_empty()) == true
has_head_prop = Enum.any?(assumptions, &match?({{5, :c_head, _, _}, true}, &1))
has_tail_prop = Enum.any?(assumptions, &match?({{5, :d_tail, _, _}, true}, &1))
if is_empty and (has_head_prop or has_tail_prop), do: :error, else: :ok
end
defp check_integer_consistency(assumptions) do
initial_range = {:neg_inf, :pos_inf}
result =
Enum.reduce_while(assumptions, initial_range, fn assumption, {min, max} ->
case assumption do
{{2, :alt, n, _}, true} -> narrow_range(min, safe_min(max, n - 1))
{{2, :alt, n, _}, false} -> narrow_range(safe_max(min, n), max)
{{2, :beq, n, _}, true} -> narrow_range(safe_max(min, n), safe_min(max, n))
{{2, :beq, n, _}, false} when min == n and max == n -> {:halt, :invalid}
{{2, :cgt, n, _}, true} -> narrow_range(safe_max(min, n + 1), max)
{{2, :cgt, n, _}, false} -> narrow_range(min, safe_min(max, n))
_ -> {:cont, {min, max}}
end
end)
case result,
do:
(
:invalid -> :error
_ -> :ok
)
end
# **IMPROVED**: A clearer implementation for checking range validity.
defp narrow_range(min, max) do
is_invalid =
case {min, max} do
{:neg_inf, _} -> false
{_, :pos_inf} -> false
{m, n} when is_integer(m) and is_integer(n) -> m > n
# Should not happen with safe helpers
_ -> false
end
if is_invalid, do: {:halt, :invalid}, else: {:cont, {min, max}}
end
# **NEW**: Safe comparison helpers that understand :neg_inf and :pos_inf
defp safe_max(:neg_inf, x), do: x
defp safe_max(x, :neg_inf), do: x
defp safe_max(:pos_inf, _), do: :pos_inf
defp safe_max(_, :pos_inf), do: :pos_inf
defp safe_max(a, b), do: :erlang.max(a, b)
defp safe_min(:pos_inf, x), do: x
defp safe_min(x, :pos_inf), do: x
defp safe_min(:neg_inf, _), do: :neg_inf
defp safe_min(_, :neg_inf), do: :neg_inf
defp safe_min(a, b), do: :erlang.min(a, b)
# --- Step 3: Recursive Consistency (Disabled but preserved) ---
defp check_recursive_consistency(assumptions, full_context) do
# 1. Gather all ambient constraints from the parent context.
ambient_constraints =
Enum.reduce(full_context, %{}, fn
# This now correctly handles all cases, not just `{var, true}`.
{var, true}, acc ->
case Info.get_traits(var) do
%{type: :list_recursive_ambient, ambient_constraint_spec: spec} ->
Map.merge(acc, %{head: spec, tail: spec})
_ ->
acc
end
_, acc ->
acc
end)
# 2. Partition assumptions into sub-problems (head, tail, tuple elements).
sub_problems =
Enum.group_by(assumptions, &Info.get_traits(elem(&1, 0))[:sub_key])
|> Map.drop([nil])
# 3. Check each sub-problem against its ambient constraint.
Enum.reduce_while(sub_problems, :ok, fn {sub_key, sub_assumptions_list}, _acc ->
ambient_spec = Map.get(ambient_constraints, sub_key)
# Re-map nested vars to base form for reconstruction
remapped_assumptions =
Map.new(sub_assumptions_list, fn {var, val} ->
# Simplified pattern match to extract the inner variable
{_cat, _pred, _idx, nested_var} = var
{nested_var, val}
end)
reconstructed_spec = TypeReconstructor.spec_from_assumptions(remapped_assumptions)
# Compile both specs to TDDs and check for subtyping.
# THIS IS THE SOURCE OF THE LOGICAL CYCLE
reconstructed_id = Compiler.spec_to_id(reconstructed_spec)
ambient_id = Compiler.spec_to_id(ambient_spec)
# if Tdd.is_subtype(reconstructed_id, ambient_id) do
# ...
# else
# {:halt, :error}
# end
# For now, we assume it's okay to proceed.
case check(remapped_assumptions) do
:consistent -> {:cont, :ok}
:contradiction -> {:halt, :error}
end
end)
end
end
defmodule Tdd.Algo do
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
alias Tdd.Store
alias Tdd.Consistency.Engine
# --- Binary Operation: Apply ---
@spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
def apply(op_name, op_lambda, u1_id, u2_id) do
# Memoization wrapper
cache_key = {:apply, op_name, Enum.sort([u1_id, u2_id])}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_apply(op_name, op_lambda, u1_id, u2_id)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
defp do_apply(op_name, op_lambda, u1_id, u2_id) do
with {:ok, u1_details} <- Store.get_node(u1_id),
{:ok, u2_details} <- Store.get_node(u2_id) do
cond do
(u1_details == :true_terminal or u1_details == :false_terminal) and
(u2_details == :true_terminal or u2_details == :false_terminal) ->
if op_lambda.(u1_details, u2_details) == :true_terminal,
do: Store.true_node_id(),
else: Store.false_node_id()
u1_details == :true_terminal or u1_details == :false_terminal ->
{var2, y2, n2, d2} = u2_details
Store.find_or_create_node(
var2,
apply(op_name, op_lambda, u1_id, y2),
apply(op_name, op_lambda, u1_id, n2),
apply(op_name, op_lambda, u1_id, d2)
)
u2_details == :true_terminal or u2_details == :false_terminal ->
{var1, y1, n1, d1} = u1_details
Store.find_or_create_node(
var1,
apply(op_name, op_lambda, y1, u2_id),
apply(op_name, op_lambda, n1, u2_id),
apply(op_name, op_lambda, d1, u2_id)
)
true ->
{var1, y1, n1, d1} = u1_details
{var2, y2, n2, d2} = u2_details
top_var = Enum.min([var1, var2])
res_y =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: y1, else: u1_id),
if(var2 == top_var, do: y2, else: u2_id)
)
res_n =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: n1, else: u1_id),
if(var2 == top_var, do: n2, else: u2_id)
)
res_d =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: d1, else: u1_id),
if(var2 == top_var, do: d2, else: u2_id)
)
Store.find_or_create_node(top_var, res_y, res_n, res_d)
end
end
end
# --- Unary Operation: Negation ---
@spec negate(non_neg_integer) :: non_neg_integer
def negate(tdd_id) do
cache_key = {:negate, tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id =
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.false_node_id()
{:ok, :false_terminal} ->
Store.true_node_id()
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(var, negate(y), negate(n), negate(d))
end
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# --- Unary Operation: Semantic Simplification ---
@spec simplify(non_neg_integer(), map()) :: non_neg_integer
def simplify(tdd_id, assumptions \\ %{}) do
# Sort assumptions to ensure the cache key is canonical.
sorted_assumptions = Enum.sort(assumptions)
cache_key = {:simplify, tdd_id, sorted_assumptions}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_simplify(tdd_id, assumptions)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
defp do_simplify(tdd_id, assumptions) do
# 1. Check if the current path is contradictory. If so, prune this branch.
if Engine.check(assumptions) == :contradiction do
Store.false_node_id()
else
case Store.get_node(tdd_id) do
# 2. Terminal nodes are already simple.
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
# 3. Handle non-terminal nodes.
{:ok, {var, y, n, d}} ->
# 4. Check if the variable's value is already known or implied.
case Map.get(assumptions, var) do
true ->
simplify(y, assumptions)
false ->
simplify(n, assumptions)
:dc ->
simplify(d, assumptions)
nil ->
# The variable is not explicitly constrained. Check for implied constraints.
# Note: For now, the Engine only performs flat checks.
implies_true = Engine.check(Map.put(assumptions, var, false)) == :contradiction
implies_false = Engine.check(Map.put(assumptions, var, true)) == :contradiction
cond do
implies_true and implies_false ->
Store.false_node_id()
implies_true ->
simplify(y, assumptions)
implies_false ->
simplify(n, assumptions)
true ->
# No constraint, so recursively simplify all branches.
s_y = simplify(y, Map.put(assumptions, var, true))
s_n = simplify(n, Map.put(assumptions, var, false))
s_d = simplify(d, Map.put(assumptions, var, :dc))
Store.find_or_create_node(var, s_y, s_n, s_d)
end
end
end
end
end
end
defmodule Tdd.TypeReconstructor do
@moduledoc """
Reconstructs a high-level `TypeSpec` from a low-level assumption map.
This module performs the inverse operation of the TDD compiler. It takes a
set of predicate assumptions (e.g., from a path in a TDD) and synthesizes
the most specific `TypeSpec` that satisfies all of those assumptions.
"""
alias Tdd.TypeSpec
alias Tdd.Predicate.Info
alias Tdd.Variable
@doc """
Takes a map of `{variable, boolean}` assumptions and returns a `TypeSpec`.
"""
@spec spec_from_assumptions(map()) :: TypeSpec.t()
def spec_from_assumptions(assumptions) do
# 1. Partition assumptions into groups for the top-level entity and its sub-components.
partitions =
Enum.group_by(assumptions, fn {var, _val} ->
case Info.get_traits(var) do
# :head or :tail
%{type: :list_recursive, sub_key: key} -> key
# {:elem, index}
%{type: :tuple_recursive, sub_key: key} -> key
# All other predicates apply to the top-level entity
_ -> :top_level
end
end)
# 2. Reconstruct the spec for the top-level entity from its flat assumptions.
top_level_assumptions = Map.get(partitions, :top_level, []) |> Map.new()
top_level_spec = spec_from_flat_assumptions(top_level_assumptions)
# 3. Recursively reconstruct specs for all sub-problems (head, tail, elements).
sub_problem_specs =
partitions
|> Map.drop([:top_level])
|> Enum.map(fn {sub_key, sub_assumptions_list} ->
# Re-map the nested variables back to their base form for the recursive call.
# e.g., {{5, :c_head, NESTED_VAR, _}, val} -> {NESTED_VAR, val}
remapped_assumptions =
sub_assumptions_list
|> Map.new(fn {var, val} ->
# This pattern matching is a bit simplified for clarity
{_cat, _pred, nested_var_or_idx, maybe_nested_var} = var
nested_var =
if is_nil(maybe_nested_var), do: nested_var_or_idx, else: maybe_nested_var
{nested_var, val}
end)
# Recursively build the spec for the sub-problem
sub_spec = spec_from_assumptions(remapped_assumptions)
# Wrap it in a constructor that describes its relationship to the parent
case sub_key do
# Partial spec: just describes the head
:head ->
{:cons, sub_spec, :any}
# Partial spec: just describes the tail
:tail ->
{:cons, :any, sub_spec}
{:elem, index} ->
# Create a sparse tuple spec, e.g., {any, any, <sub_spec>, any}
# This is complex, a simpler approach is needed for now.
# For now, we'll just return a tuple spec that isn't fully specific.
# A full implementation would need to know the tuple's size.
# This is an oversimplification but works for demo
{:tuple, [sub_spec]}
end
end)
# 4. The final spec is the intersection of the top-level spec and all sub-problem specs.
final_spec_list = [top_level_spec | sub_problem_specs]
TypeSpec.normalize({:intersect, final_spec_list})
end
@doc "Handles only the 'flat' (non-recursive) assumptions for a single entity."
defp spec_from_flat_assumptions(assumptions) do
specs =
Enum.map(assumptions, fn {var, bool_val} ->
# Convert each assumption into a `TypeSpec`.
# A `true` assumption means the type is `X`.
# A `false` assumption means the type is `¬X`.
spec =
case var do
{0, :is_atom, _, _} -> :atom
{0, :is_integer, _, _} -> :integer
{0, :is_list, _, _} -> :list
{0, :is_tuple, _, _} -> :tuple
{1, :value, val, _} -> {:literal, val}
# For integer properties, we create a range spec. This part could be more detailed.
# x < n
{2, :alt, n, _} -> {:integer_range, :neg_inf, n - 1}
{2, :beq, n, _} -> {:literal, n}
# x > n
{2, :cgt, n, _} -> {:integer_range, n + 1, :pos_inf}
# Simplified for now
{4, :a_size, _, _} -> :tuple
{5, :b_is_empty, _, _} -> {:literal, []}
# Ignore recursive and ambient vars at this flat level
_ -> :any
end
if bool_val, do: spec, else: {:negation, spec}
end)
# The result is the intersection of all the individual specs.
TypeSpec.normalize({:intersect, specs})
end
end
defmodule Tdd.Compiler do
@moduledoc "Compiles a `TypeSpec` into a canonical TDD ID."
alias Tdd.TypeSpec
alias Tdd.Variable
alias Tdd.Store
alias Tdd.Algo
@doc "The main entry point. Takes a spec and returns its TDD ID."
@spec spec_to_id(TypeSpec.t()) :: non_neg_integer()
def spec_to_id(spec) do
# Memoization wrapper for the entire compilation process.
normalized_spec = TypeSpec.normalize(spec)
cache_key = {:spec_to_id, normalized_spec}
case Store.get_op_cache(cache_key) do
{:ok, id} ->
id
:not_found ->
id = do_spec_to_id(normalized_spec)
Store.put_op_cache(cache_key, id)
id
end
end
# The core compilation logic
defp do_spec_to_id(spec) do
raw_id =
case spec do
# --- Base Types ---
:any ->
Store.true_node_id()
:none ->
Store.false_node_id()
:atom ->
Store.find_or_create_node(
Variable.v_is_atom(),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
:integer ->
Store.find_or_create_node(
Variable.v_is_integer(),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
# Add :list, :tuple etc. here. They are simple structural TDDs.
:list ->
Store.find_or_create_node(
Variable.v_is_list(),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
# --- Literal Types ---
{:literal, val} when is_atom(val) ->
eq_node =
Store.find_or_create_node(
Variable.v_atom_eq(val),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
Store.find_or_create_node(
Variable.v_is_atom(),
eq_node,
Store.false_node_id(),
Store.false_node_id()
)
{:literal, val} when is_integer(val) ->
eq_node =
Store.find_or_create_node(
Variable.v_int_eq(val),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
Store.find_or_create_node(
Variable.v_is_integer(),
eq_node,
Store.false_node_id(),
Store.false_node_id()
)
{:literal, []} ->
empty_node =
Store.find_or_create_node(
Variable.v_list_is_empty(),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
Store.find_or_create_node(
Variable.v_is_list(),
empty_node,
Store.false_node_id(),
Store.false_node_id()
)
{:integer_range, min, max} ->
# A helper function to define the intersection operation once.
op_intersect = fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
:true_terminal, t2 -> t2
t1, :true_terminal -> t1
end
# Start with the base type, `integer`.
# Note: We call spec_to_id here, which is safe because `:integer` is a base case.
base_id = spec_to_id(:integer)
# Intersect with the lower bound condition, if it exists.
id_with_min =
if min == :neg_inf do
base_id
else
# The condition is `value >= min`, which is equivalent to `NOT (value < min)`.
# The variable for `value < min` is `v_int_lt(min)`.
lt_min_tdd =
Store.find_or_create_node(
Variable.v_int_lt(min),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
gte_min_tdd = Algo.negate(lt_min_tdd)
Algo.apply(:intersect, op_intersect, base_id, gte_min_tdd)
end
# Intersect the result with the upper bound condition, if it exists.
id_with_max =
if max == :pos_inf do
id_with_min
else
# The condition is `value <= max`, which is equivalent to `value < max + 1`.
# The variable for this is `v_int_lt(max + 1)`.
lt_max_plus_1_tdd =
Store.find_or_create_node(
Variable.v_int_lt(max + 1),
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
Algo.apply(:intersect, op_intersect, id_with_min, lt_max_plus_1_tdd)
end
# The raw TDD is now built. The final call to Algo.simplify at the end
# of do_spec_to_id will canonicalize it.
id_with_max
# Add other literals as needed
# --- Set-Theoretic Combinators ---
{:union, specs} ->
ids = Enum.map(specs, &spec_to_id/1)
Enum.reduce(ids, Store.false_node_id(), fn id, acc ->
Algo.apply(
:sum,
fn
:true_terminal, _ -> :true_terminal
_, :true_terminal -> :true_terminal
:false_terminal, t2 -> t2
t1, :false_terminal -> t1
end,
id,
acc
)
end)
{:intersect, specs} ->
ids = Enum.map(specs, &spec_to_id/1)
Enum.reduce(ids, Store.true_node_id(), fn id, acc ->
Algo.apply(
:intersect,
fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
:true_terminal, t2 -> t2
t1, :true_terminal -> t1
end,
id,
acc
)
end)
{:negation, sub_spec} ->
Algo.negate(spec_to_id(sub_spec))
# --- Recursive Types (STUBS for now) ---
# These will be implemented in Step 3
{:list_of, _} ->
raise "Tdd.Compiler: :list_of not yet implemented"
{:tuple, _} ->
raise "Tdd.Compiler: {:tuple, [...]} not yet implemented"
{:cons, _, _} ->
raise "Tdd.Compiler: :cons not yet implemented"
# --- Default ---
_ ->
raise "Tdd.Compiler: Cannot compile unknown spec: #{inspect(spec)}"
end
# CRUCIAL: Every constructed TDD must be passed through simplify
# to ensure it's in its canonical, semantically-reduced form.
Algo.simplify(raw_id)
end
end
####
# xxx
####
defmodule TddStoreTests do
def test(name, expected, result) do
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Store Tests ---")
Process.put(:test_failures, [])
# --- Test Setup ---
Tdd.Store.init()
# --- Test Cases ---
IO.puts("\n--- Section: Initialization and Terminals ---")
test("true_node_id returns 1", 1, Tdd.Store.true_node_id())
test("false_node_id returns 0", 0, Tdd.Store.false_node_id())
test("get_node for ID 1 returns true_terminal", {:ok, :true_terminal}, Tdd.Store.get_node(1))
test(
"get_node for ID 0 returns false_terminal",
{:ok, :false_terminal},
Tdd.Store.get_node(0)
)
test(
"get_node for unknown ID returns not_found",
{:error, :not_found},
Tdd.Store.get_node(99)
)
IO.puts("\n--- Section: Node Creation and Structural Sharing ---")
# Define some opaque variables
var_a = {:is_atom}
var_b = {:is_integer}
true_id = Tdd.Store.true_node_id()
false_id = Tdd.Store.false_node_id()
# Create a new node. It should get ID 2.
id1 = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test("First created node gets ID 2", 2, id1)
# Verify its content
test(
"get_node for ID 2 returns the correct tuple",
{:ok, {var_a, true_id, false_id, false_id}},
Tdd.Store.get_node(id1)
)
# Create another, different node. It should get ID 3.
id2 = Tdd.Store.find_or_create_node(var_b, id1, false_id, false_id)
test("Second created node gets ID 3", 3, id2)
# Attempt to create the first node again.
id1_again = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test(
"Attempting to create an existing node returns the same ID (Structural Sharing)",
id1,
id1_again
)
# Check that next_id was not incremented by the shared call
id3 = Tdd.Store.find_or_create_node(var_b, true_id, false_id, false_id)
test("Next new node gets the correct ID (4)", 4, id3)
IO.puts("\n--- Section: Basic Reduction Rule ---")
# Create a node where all children are the same.
id_redundant = Tdd.Store.find_or_create_node(var_a, id3, id3, id3)
test(
"A node with identical children reduces to the child's ID",
id3,
id_redundant
)
IO.puts("\n--- Section: Caching ---")
cache_key = {:my_op, 1, 2}
test("Cache is initially empty for a key", :not_found, Tdd.Store.get_op_cache(cache_key))
Tdd.Store.put_op_cache(cache_key, :my_result)
test(
"Cache returns the stored value after put",
{:ok, :my_result},
Tdd.Store.get_op_cache(cache_key)
)
Tdd.Store.put_op_cache(cache_key, :new_result)
test("Cache can be updated", {:ok, :new_result}, Tdd.Store.get_op_cache(cache_key))
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Store tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TypeSpecTests do
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, tested) do
current_failures = Process.get(:test_failures, [])
result = TypeSpec.normalize(tested)
# Use a custom comparison to handle potentially unsorted lists in expected values
# The normalize function *should* sort, but this makes tests more robust.
is_equal =
case {expected, result} do
{{:union, list1}, {:union, list2}} -> Enum.sort(list1) == Enum.sort(list2)
{{:intersect, list1}, {:intersect, list2}} -> Enum.sort(list1) == Enum.sort(list2)
_ -> expected == result
end
if is_equal do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" tested: #{inspect(tested)}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeSpec.normalize/1 Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Base & Simple Types ---
IO.puts("\n--- Section: Base & Simple Types ---")
test("Normalizing :any is idempotent", :any, :any)
test("Normalizing :none is idempotent", :none, :none)
test("Normalizing :atom is idempotent", :atom, :atom)
test("Normalizing a literal is idempotent", {:literal, :foo}, {:literal, :foo})
# --- Test Section: Double Negation ---
IO.puts("\n--- Section: Double Negation ---")
test("¬(¬atom) simplifies to atom", :atom, {:negation, {:negation, :atom}})
test("A single negation is preserved", {:negation, :integer}, {:negation, :integer})
test(
"¬(¬(¬atom)) simplifies to ¬atom",
{:negation, :atom},
{:negation, {:negation, {:negation, :atom}}}
)
# --- Test Section: Union Normalization ---
IO.puts("\n--- Section: Union Normalization ---")
test(
"Flattens nested unions",
{:union, [:atom, :integer, :list]},
{:union, [:integer, {:union, [:list, :atom]}]}
)
test(
"Sorts members of a union",
{:union, [:atom, :integer, :list]},
{:union, [:list, :integer, :atom]}
)
test(
"Removes duplicates in a union",
{:union, [:atom, :integer]},
{:union, [:integer, :atom, :integer]}
)
test("Simplifies a union with :none (A | none -> A)", :atom, {:union, [:atom, :none]})
test("Simplifies a union with :any (A | any -> any)", :any, {:union, [:atom, :any]})
test("An empty union simplifies to :none", :none, {:union, []})
test("A union containing only :none simplifies to :none", :none, {:union, [:none, :none]})
test("A union of a single element simplifies to the element itself", :atom, {:union, [:atom]})
# --- Test Section: Intersection Normalization ---
IO.puts("\n--- Section: Intersection Normalization ---")
test(
"Flattens nested intersections",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:integer, {:intersect, [:list, :atom]}]}
)
test(
"Sorts members of an intersection",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:list, :integer, :atom]}
)
test(
"Removes duplicates in an intersection",
{:intersect, [:atom, :integer]},
{:intersect, [:integer, :atom, :integer]}
)
test(
"Simplifies an intersection with :any (A & any -> A)",
:atom,
{:intersect, [:atom, :any]}
)
test(
"Simplifies an intersection with :none (A & none -> none)",
:none,
{:intersect, [:atom, :none]}
)
test("An empty intersection simplifies to :any", :any, {:intersect, []})
test(
"An intersection of a single element simplifies to the element itself",
:atom,
{:intersect, [:atom]}
)
# --- Test Section: Recursive Normalization ---
IO.puts("\n--- Section: Recursive Normalization ---")
test(
"Recursively normalizes elements in a tuple",
{:tuple, [:atom, {:union, [{:literal, :a}, {:literal, :b}]}]},
{:tuple, [{:union, [:atom]}, {:union, [{:literal, :a}, {:literal, :b}]}]}
)
test(
"Recursively normalizes head and tail in a cons",
{:cons, :any, {:negation, :integer}},
{:cons, {:union, [:atom, :any]}, {:negation, {:union, [:integer]}}}
)
test(
"Recursively normalizes element in list_of",
{:list_of, :list},
{:list_of, {:intersect, [:any, :list]}}
)
test(
"Recursively normalizes sub-spec in negation",
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}},
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}}
)
# --- Test Section: Complex Nested Cases ---
IO.puts("\n--- Section: Complex Nested Cases ---")
complex_spec =
{:union,
[
:atom,
# simplifies to :integer
{:intersect, [:any, :integer, {:intersect, [:integer]}]},
# simplifies to :list
{:union, [:none, :list]}
]}
test(
"Handles complex nested simplifications correctly",
{:union, [:atom, :integer, :list]},
complex_spec
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeSpec tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures:")
Enum.each(failures, &IO.puts(" - #{&1}"))
end
end
end
defmodule TddVariableTests do
alias Tdd.Variable
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, result) do
current_failures = Process.get(:test_failures, [])
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.Variable Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Variable Structure ---
IO.puts("\n--- Section: Variable Structure ---")
test("v_is_atom returns correct tuple", {0, :is_atom, nil, nil}, Variable.v_is_atom())
test("v_atom_eq returns correct tuple", {1, :value, :foo, nil}, Variable.v_atom_eq(:foo))
test("v_int_lt returns correct tuple", {2, :alt, 10, nil}, Variable.v_int_lt(10))
test(
"v_tuple_size_eq returns correct tuple",
{4, :a_size, 2, nil},
Variable.v_tuple_size_eq(2)
)
test(
"v_tuple_elem_pred nests a variable correctly",
{4, :b_element, 0, {0, :is_integer, nil, nil}},
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
test(
"v_list_is_empty returns correct tuple",
{5, :b_is_empty, nil, nil},
Variable.v_list_is_empty()
)
test(
"v_list_head_pred nests a variable correctly",
{5, :c_head, {0, :is_atom, nil, nil}, nil},
Variable.v_list_head_pred(Variable.v_is_atom())
)
test(
"v_list_all_elements_are nests a TypeSpec correctly",
{5, :a_all_elements, {:union, [:atom, :integer]}, nil},
Variable.v_list_all_elements_are(TypeSpec.normalize({:union, [:integer, :atom]}))
)
# --- Test Section: Global Ordering ---
IO.puts("\n--- Section: Global Ordering (Based on Elixir Term Comparison) ---")
# Category 0 < Category 1
test(
"Primary type var < Atom property var",
true,
Variable.v_is_tuple() < Variable.v_atom_eq(:anything)
)
# Within Category 2: alt < beq < cgt
test(
"Integer :lt var < Integer :eq var",
true,
Variable.v_int_lt(10) < Variable.v_int_eq(10)
)
test(
"Integer :eq var < Integer :gt var",
true,
Variable.v_int_eq(10) < Variable.v_int_gt(10)
)
# Within Category 2: comparison of value
test(
"Integer :eq(5) var < Integer :eq(10) var",
true,
Variable.v_int_eq(5) < Variable.v_int_eq(10)
)
# Within Category 4: comparison of index
test(
"Tuple elem(0) var < Tuple elem(1) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(1, Variable.v_is_atom())
)
# Within Category 4, same index: comparison of nested var
test(
"Tuple elem(0, atom) var < Tuple elem(0, int) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
IO.inspect(Variable.v_list_all_elements_are(:atom),
label: "Variable.v_list_all_elements_are(:atom)"
)
IO.inspect(Variable.v_list_is_empty(), label: "Variable.v_list_is_empty()")
test(
"List :a_all_elements var < List :b_is_empty var",
true,
Variable.v_list_all_elements_are(:atom) < Variable.v_list_is_empty()
)
test(
"List :b_is_empty var < List :c_head var",
true,
Variable.v_list_is_empty() < Variable.v_list_head_pred(Variable.v_is_atom())
)
test(
"List :c_head var < List :tail var",
true,
Variable.v_list_head_pred(Variable.v_is_atom()) <
Variable.v_list_tail_pred(Variable.v_is_atom())
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Variable tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule ConsistencyEngineTests do
alias Tdd.Consistency.Engine
alias Tdd.Variable
defp test(name, expected, assumptions_map) do
result = Engine.check(assumptions_map)
# ... test reporting logic ...
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic & Implication Tests ---
IO.puts("\n--- Section: Basic & Implication Tests ---")
test("An empty assumption map is consistent", :consistent, %{})
test("A single valid assumption is consistent", :consistent, %{Variable.v_is_atom() => true})
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"Implication creates a consistent set",
:consistent,
# implies is_atom=true
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: Primary Type Exclusivity ---
IO.puts("\n--- Section: Primary Type Exclusivity ---")
test(
"Two primary types cannot both be true",
:contradiction,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
)
test(
"Two primary types implied to be true is a contradiction",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_int_eq(5) => true}
)
test(
"One primary type true and another false is consistent",
:consistent,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => false}
)
# --- Section: Atom Consistency ---
IO.puts("\n--- Section: Atom Consistency ---")
test(
"An atom cannot equal two different values",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_atom_eq(:bar) => true}
)
test(
"An atom can equal one value",
:consistent,
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: List Flat Consistency ---
IO.puts("\n--- Section: List Flat Consistency ---")
test(
"A list cannot be empty and have a head property",
:contradiction,
%{
Variable.v_list_is_empty() => true,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list can have a head property",
:consistent,
%{
Variable.v_list_is_empty() => false,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list is implied by head property",
:consistent,
# implies is_empty=false
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# --- Section: Integer Consistency ---
IO.puts("\n--- Section: Integer Consistency ---")
test("int == 5 is consistent", :consistent, %{Variable.v_int_eq(5) => true})
test("int == 5 AND int == 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_eq(10) => true
})
test("int < 10 AND int > 20 is a contradiction", :contradiction, %{
Variable.v_int_lt(10) => true,
Variable.v_int_gt(20) => true
})
test("int > 5 AND int < 4 is a contradiction", :contradiction, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(4) => true
})
# 6
test("int > 5 AND int < 7 is consistent", :consistent, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(7) => true
})
test("int == 5 AND int < 3 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_lt(3) => true
})
test("int == 5 AND int > 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(10) => true
})
test("int == 5 AND int > 3 is consistent", :consistent, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(3) => true
})
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Consistency.Engine tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TddAlgoTests do
alias Tdd.Store
alias Tdd.Variable
alias Tdd.Algo
alias Tdd.TypeSpec # We need this to create stable variables
# --- Test Helper ---
defp test(name, expected, result) do
# A simple equality test is sufficient here.
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
# Helper to pretty print a TDD for debugging
defp print_tdd(id, indent \\ 0) do
prefix = String.duplicate(" ", indent)
case Store.get_node(id) do
{:ok, details} ->
IO.puts("#{prefix}ID #{id}: #{inspect(details)}")
case details do
{_var, y, n, d} ->
IO.puts("#{prefix} Yes ->"); print_tdd(y, indent + 1)
IO.puts("#{prefix} No ->"); print_tdd(n, indent + 1)
IO.puts("#{prefix} DC ->"); print_tdd(d, indent + 1)
_ -> :ok
end
{:error, reason} ->
IO.puts("#{prefix}ID #{id}: Error - #{reason}")
end
end
# --- Test Runner ---
def run() do
IO.puts("\n--- Running Tdd.Algo & Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# Setup: Initialize the store and define some basic TDDs using the new modules.
Store.init()
true_id = Store.true_node_id()
false_id = Store.false_node_id()
# --- Manually build some basic type TDDs for testing ---
# t_atom = if is_atom then true else false
t_atom = Store.find_or_create_node(Variable.v_is_atom(), true_id, false_id, false_id)
# t_int = if is_int then true else false
t_int = Store.find_or_create_node(Variable.v_is_integer(), true_id, false_id, false_id)
# t_foo = if is_atom then (if value == :foo then true else false) else false
foo_val_check = Store.find_or_create_node(Variable.v_atom_eq(:foo), true_id, false_id, false_id)
t_foo = Store.find_or_create_node(Variable.v_is_atom(), foo_val_check, false_id, false_id)
# t_bar = if is_atom then (if value == :bar then true else false) else false
bar_val_check = Store.find_or_create_node(Variable.v_atom_eq(:bar), true_id, false_id, false_id)
t_bar = Store.find_or_create_node(Variable.v_is_atom(), bar_val_check, false_id, false_id)
# --- Section: Negate Algorithm ---
IO.puts("\n--- Section: Algo.negate ---")
negated_true = Algo.negate(true_id)
test("negate(true) is false", false_id, negated_true)
negated_false = Algo.negate(false_id)
test("negate(false) is true", true_id, negated_false)
# Double negation should be identity
test("negate(negate(t_atom)) is t_atom", t_atom, Algo.negate(Algo.negate(t_atom)))
# --- Section: Apply Algorithm (Union & Intersection) ---
IO.puts("\n--- Section: Algo.apply (raw structural operations) ---")
op_sum = fn
:true_terminal, _ -> :true_terminal; _, :true_terminal -> :true_terminal
t, :false_terminal -> t; :false_terminal, t -> t
end
op_intersect = fn
:false_terminal, _ -> :false_terminal; _, :false_terminal -> :false_terminal
t, :true_terminal -> t; :true_terminal, t -> t
end
# atom | int
sum_atom_int = Algo.apply(:sum, op_sum, t_atom, t_int)
# The result should be a node that checks is_atom, then if false, checks is_int
# We expect a structure like: if is_atom -> true, else -> t_int
is_atom_node = {Variable.v_is_atom(), true_id, t_int, t_int}
expected_sum_structure_id = Store.find_or_create_node(elem(is_atom_node, 0), elem(is_atom_node, 1), elem(is_atom_node, 2), elem(is_atom_node, 3))
test("Structure of 'atom | int' is correct", expected_sum_structure_id, sum_atom_int)
# :foo & :bar (structurally, before simplification)
# It should build a tree that checks is_atom, then value==:foo, then value==:bar
# This will be complex, but the key is that it's NOT the false_id yet.
intersect_foo_bar_raw = Algo.apply(:intersect, op_intersect, t_foo, t_bar)
test(":foo & :bar (raw) is not the false node", false, intersect_foo_bar_raw == false_id)
# --- Section: Simplify Algorithm (Flat Types) ---
IO.puts("\n--- Section: Algo.simplify (with Consistency.Engine) ---")
# An impossible structure: node that requires a value to be an atom AND an integer
# This tests the `check_primary_exclusivity` rule.
contradictory_assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
# Simplifying ANYTHING under contradictory assumptions should yield `false`.
simplified_under_contradiction = Algo.simplify(true_id, contradictory_assumptions)
test("Simplifying under contradictory assumptions (atom & int) results in false", false_id, simplified_under_contradiction)
# Test implication: A property implies its primary type
# A value being `:foo` implies it is an atom.
assumptions_with_foo = %{Variable.v_atom_eq(:foo) => true}
# If we simplify t_int under this assumption, it should become false.
# The engine expands to `{is_atom: true, value==:foo: true}`. Then it sees that
# the t_int node's variable `is_integer` must be false (from exclusivity rule).
simplified_int_given_foo = Algo.simplify(t_int, assumptions_with_foo)
test("Simplifying 'integer' given 'value==:foo' results in false", false_id, simplified_int_given_foo)
# Now, let's simplify the raw intersection of :foo and :bar
simplified_foo_bar = Algo.simplify(intersect_foo_bar_raw, %{})
# The simplify algorithm should discover the contradiction that an atom cannot be
# both :foo and :bar at the same time. (This requires `check_atom_consistency` to be implemented).
# For now, we stub it and test the plumbing.
# Let's test a simpler contradiction that we *have* implemented.
intersect_atom_int_raw = Algo.apply(:intersect, op_intersect, t_atom, t_int)
simplified_atom_int = Algo.simplify(intersect_atom_int_raw, %{})
test("Simplifying 'atom & int' results in false", false_id, simplified_atom_int)
# Test path collapsing
# If we simplify 'atom | int' under the assumption 'is_atom == true', it should become `true`.
simplified_sum_given_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => true})
test("Simplifying 'atom | int' given 'is_atom==true' results in true", true_id, simplified_sum_given_atom)
# If we simplify 'atom | int' under the assumption 'is_atom == false', it should become `t_int`.
simplified_sum_given_not_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => false})
test("Simplifying 'atom | int' given 'is_atom==false' results in 'integer'", t_int, simplified_sum_given_not_atom)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Algo tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
# Optional: print details of failed tests if needed
end
end
end
defmodule TypeReconstructorTests do
alias Tdd.TypeReconstructor
alias Tdd.Variable
alias Tdd.TypeSpec
defp test(name, expected_spec, assumptions) do
# Normalize both expected and result for a canonical comparison
expected = TypeSpec.normalize(expected_spec)
result = TypeSpec.normalize(TypeReconstructor.spec_from_assumptions(assumptions))
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeReconstructor Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic Flat Reconstructions ---
IO.puts("\n--- Section: Basic Flat Reconstructions ---")
test("is_atom=true -> atom", :atom, %{Variable.v_is_atom() => true})
test("is_atom=false -> ¬atom", {:negation, :atom}, %{Variable.v_is_atom() => false})
test(
"is_atom=true AND value==:foo -> :foo",
{:literal, :foo},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => true}
)
test(
"is_atom=true AND value!=:foo -> atom & ¬:foo",
{:intersect, [:atom, {:negation, {:literal, :foo}}]},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => false}
)
test(
"is_integer=true AND int==5 -> 5",
{:literal, 5},
%{Variable.v_is_integer() => true, Variable.v_int_eq(5) => true}
)
test(
"is_list=true AND is_empty=true -> []",
{:literal, []},
%{Variable.v_is_list() => true, Variable.v_list_is_empty() => true}
)
# --- Section: Combined Flat Reconstructions ---
IO.puts("\n--- Section: Combined Flat Reconstructions ---")
test(
"int > 10 AND int < 20",
# This is complex. Our simple reconstructor makes two separate ranges.
# A more advanced one would combine them into a single {:integer_range, 11, 19}.
# For now, we test the current behavior.
{:intersect,
[
:integer,
{:integer_range, 11, :pos_inf},
{:integer_range, :neg_inf, 19}
]},
%{Variable.v_int_gt(10) => true, Variable.v_int_lt(20) => true}
)
# --- Section: Recursive Reconstructions (Simplified) ---
IO.puts("\n--- Section: Recursive Reconstructions ---")
# This tests the partitioning and recursive call logic.
# Our simple implementation of recursive cases means we can only test simple things.
test(
"head is an atom",
{:intersect, [:list, {:cons, :atom, :any}]},
# Assumption for `is_list=true` is implied by `v_list_head_pred`
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# Note: The recursive tests are limited by the simplifications made in the
# implementation (e.g., tuple reconstruction). A full implementation would
# require more context (like tuple size) to be passed down.
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeReconstructor tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule CompilerAlgoTests do
alias Tdd.Compiler
alias Tdd.Store
alias Tdd.Algo
# High-level helpers that mimic the final API
defp is_subtype(spec1, spec2) do
id1 = Compiler.spec_to_id(spec1)
id2 = Compiler.spec_to_id(spec2)
# The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
neg_id2 = Algo.negate(id2)
op_intersect = fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
t, :true_terminal -> t
:true_terminal, t -> t
# Default case for non-terminal nodes, though apply handles recursion
_t1, _t2 -> :non_terminal
end
intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
final_id = Algo.simplify(intersect_id)
final_id == Store.false_node_id()
end
defp are_equivalent(spec1, spec2) do
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
end
defp is_contradiction(spec) do
Compiler.spec_to_id(spec) == Store.false_node_id()
end
defp test_subtype(name, expected, s1, s2), do: test(name, expected, is_subtype(s1, s2))
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1))
defp test(name, exp, res) do
is_ok = exp == res
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(exp)}")
IO.puts(" Got: #{inspect(res)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Compiler & Algo Integration Tests ---")
Process.put(:test_failures, [])
# Setup
Tdd.Store.init()
# --- Section: Basic Compilation & Equivalence ---
IO.puts("\n--- Section: Basic Equivalences ---")
test_equiv("atom & any == atom", true, {:intersect, [:atom, :any]}, :atom)
test_equiv("atom | none == atom", true, {:union, [:atom, :none]}, :atom)
test_equiv("atom & int == none", true, {:intersect, [:atom, :integer]}, :none)
test_equiv("¬(¬atom) == atom", true, {:negation, {:negation, :atom}}, :atom)
test_equiv("atom | atom == atom", true, {:union, [:atom, :atom]}, :atom)
# --- Section: Basic Subtyping ---
IO.puts("\n--- Section: Basic Subtyping ---")
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
test_subtype("none <: atom", true, :none, :atom)
test_subtype("atom <: any", true, :atom, :any)
# --- Section: Integer Range Logic ---
IO.puts("\n--- Section: Integer Range Logic ---")
int_5_to_10 = {:integer_range, 5, 10}
int_7_to_8 = {:integer_range, 7, 8}
int_15_to_20 = {:integer_range, 15, 20}
int_0_to_100 = {:integer_range, 0, 100}
test_subtype("range(7..8) <: range(5..10)", true, int_7_to_8, int_5_to_10)
test_subtype("range(5..10) <: range(7..8)", false, int_5_to_10, int_7_to_8)
test_subtype("range(5..10) <: range(15..20)", false, int_5_to_10, int_15_to_20)
test_equiv(
"range(5..10) & range(7..8) == range(7..8)",
true,
{:intersect, [int_5_to_10, int_7_to_8]},
int_7_to_8
)
test_equiv(
"range(5..10) & range(0..100) == range(5..10)",
true,
{:intersect, [int_5_to_10, int_0_to_100]},
int_5_to_10
)
test_equiv(
"range(5..10) | range(7..8) == range(5..10)",
true,
{:union, [int_5_to_10, int_7_to_8]},
int_5_to_10
)
# --- Section: Contradictions & Simplifications ---
IO.puts("\n--- Section: Contradictions & Simplifications ---")
test_contradiction("atom & integer").({:intersect, [:atom, :integer]})
test_contradiction(":foo & :bar").({:intersect, [{:literal, :foo}, {:literal, :bar}]})
test_contradiction("atom & (int==5)").({:intersect, [:atom, {:literal, 5}]})
test_contradiction("range(5..10) & range(15..20)").({:intersect, [int_5_to_10, int_15_to_20]})
test_contradiction("integer & ¬integer").({:intersect, [:integer, {:negation, :integer}]})
# --- Section: Subtype Reduction in Normalization ---
IO.puts("\n--- Section: Subtype Reduction Logic ---")
test_equiv(
"(:foo | :bar | atom) simplifies to atom",
true,
{:union, [{:literal, :foo}, {:literal, :bar}, :atom]},
:atom
)
test_equiv(
"(range(5..10) | integer) simplifies to integer",
true,
{:union, [int_5_to_10, :integer]},
:integer
)
test_equiv(
"(:foo & atom) simplifies to :foo",
true,
{:intersect, [{:literal, :foo}, :atom]},
{:literal, :foo}
)
test_equiv(
"(range(5..10) & integer) simplifies to range(5..10)",
true,
{:intersect, [int_5_to_10, :integer]},
int_5_to_10
)
# --- Section: Logical Laws (Distribution and De Morgan's) ---
IO.puts("\n--- Section: Logical Laws ---")
# De Morgan's Law: ¬(A | B) == (¬A & ¬B)
spec_not_a_or_b = {:negation, {:union, [:atom, :integer]}}
spec_not_a_and_not_b = {:intersect, [{:negation, :atom}, {:negation, :integer}]}
test_equiv(
"De Morgan's (¬(A|B) == ¬A & ¬B) holds",
true,
spec_not_a_or_b,
spec_not_a_and_not_b
)
# De Morgan's Law: ¬(A & B) == (¬A | ¬B)
spec_not_a_and_b = {:negation, {:intersect, [{:literal, :foo}, int_5_to_10]}}
spec_not_a_or_not_b = {:union, [{:negation, {:literal, :foo}}, {:negation, int_5_to_10}]}
test_equiv(
"De Morgan's (¬(A&B) == ¬A | ¬B) holds",
true,
spec_not_a_and_b,
spec_not_a_or_not_b
)
# Distributive Law: A & (B | C) == (A & B) | (A & C)
spec_a = :integer
spec_b = {:integer_range, 0, 10}
spec_c = {:integer_range, 20, 30}
spec_dist_lhs = {:intersect, [spec_a, {:union, [spec_b, spec_c]}]}
spec_dist_rhs = {:union, [{:intersect, [spec_a, spec_b]}, {:intersect, [spec_a, spec_c]}]}
test_equiv(
"Distributive Law (A & (B|C)) holds",
true,
spec_dist_lhs,
spec_dist_rhs
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Compiler & Algo Integration tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
TypeSpecTests.run()
TddStoreTests.run()
TddVariableTests.run()
TddAlgoTests.run()
ConsistencyEngineTests.run()
TypeReconstructorTests.run()
CompilerAlgoTests.run()