elipl/new.exs

4555 lines
155 KiB
Elixir

Code.require_file("./debug.exs")
defmodule Tdd.TypeSpec do
@moduledoc """
Defines the `TypeSpec` structure and functions for its manipulation.
Normalization includes alpha-conversion, beta-reduction, and a final
canonical renaming pass for bound variables.
"""
# --- Core Types ---
@type t ::
:any
| :none
| :atom
| :integer
| :list
| :tuple
| {:literal, term()}
| {:union, [t()]}
| {:intersect, [t()]}
| {:negation, t()}
| {:tuple, [t()]}
| {:cons, head :: t(), tail :: t()}
| {:list_of, element :: t()}
| {:integer_range, min :: integer() | :neg_inf, max :: integer() | :pos_inf}
| {:type_var, name :: atom()}
| {:mu, type_variable_name :: atom(), body :: t()}
| {:type_lambda, param_names :: [atom()], body :: t()}
| {:type_apply, constructor_spec :: t(), arg_specs :: [t()]}
@doc """
Converts a `TypeSpec` into its canonical (normalized) form.
Performs structural normalization, alpha-conversion, beta-reduction,
and a final canonical renaming pass for all bound variables.
"""
@spec normalize(t()) :: t()
def normalize(spec) do
{intermediate_normalized, _counter_after_pass1} = normalize_pass1(spec, %{}, 0)
{final_spec_before_subtype_redux, _mu_counter, _lambda_counter} =
canonical_rename_pass(intermediate_normalized, %{}, 0, 0)
apply_subtype_reduction(final_spec_before_subtype_redux)
end
# Final pass for subtype-based reductions on fully canonical specs
defp apply_subtype_reduction(spec) do
case spec do
{:union, members} ->
recursively_reduced_members = Enum.map(members, &apply_subtype_reduction/1)
flattened_members =
Enum.flat_map(recursively_reduced_members, fn
{:union, sub_members} -> sub_members
m -> [m]
end)
unique_no_none =
flattened_members
|> Enum.reject(&(&1 == :none))
|> Enum.uniq()
if Enum.member?(unique_no_none, :any) do
:any
else
# Pass `true` for already_normalized flag to is_subtype?
final_members =
Enum.reject(unique_no_none, fn member_to_check ->
Enum.any?(unique_no_none, fn other_member ->
member_to_check != other_member and
is_subtype?(member_to_check, other_member, true)
end)
end)
case Enum.sort(final_members) do
[] -> :none
[single] -> single
list_members -> {:union, list_members}
end
end
{:intersect, members} ->
recursively_reduced_members = Enum.map(members, &apply_subtype_reduction/1)
expanded_flattened_members =
Enum.flat_map(recursively_reduced_members, fn
{:intersect, sub_members} -> sub_members
# get_supertypes expects normalized spec, and its output is also normalized
# Pass flag
m -> get_supertypes(m, true)
end)
unique_no_any =
expanded_flattened_members
|> Enum.reject(&(&1 == :any))
|> Enum.uniq()
if Enum.member?(unique_no_any, :none) do
:none
else
# Pass `true` for already_normalized flag to is_subtype?
final_members =
Enum.reject(unique_no_any, fn member_to_check ->
Enum.any?(unique_no_any, fn other_member ->
member_to_check != other_member and
is_subtype?(other_member, member_to_check, true)
end)
end)
case Enum.sort(final_members) do
[] -> :any
[single] -> single
list_members -> {:intersect, list_members}
end
end
{:negation, body} ->
{:negation, apply_subtype_reduction(body)}
{:tuple, elements} ->
{:tuple, Enum.map(elements, &apply_subtype_reduction/1)}
{:cons, head, tail} ->
{:cons, apply_subtype_reduction(head), apply_subtype_reduction(tail)}
{:mu, var_name, body} ->
{:mu, var_name, apply_subtype_reduction(body)}
{:type_lambda, params, body} ->
{:type_lambda, params, apply_subtype_reduction(body)}
{:type_apply, constructor, args} ->
{:type_apply, apply_subtype_reduction(constructor),
Enum.map(args, &apply_subtype_reduction/1)}
atomic_or_literal ->
atomic_or_literal
end
end
# ------------------------------------------------------------------
# Pass 1: Structural Normalization, Beta-Reduction, Initial Alpha-Conversion
# Returns: {normalized_spec, next_counter}
# ------------------------------------------------------------------
defp normalize_pass1(spec, env, counter) do
res_tuple =
case spec do
s when is_atom(s) and s in [:any, :none, :atom, :integer, :list, :tuple] ->
{s, counter}
{:literal, _val} = lit_spec ->
{lit_spec, counter}
{:type_var, name} ->
{Map.get(env, name, spec), counter}
{:negation, sub_spec} ->
normalize_negation_pass1(sub_spec, env, counter)
{:tuple, elements} ->
{normalized_elements, next_counter_after_elements} =
map_fold_counter_for_pass1(elements, env, counter, &normalize_pass1/3)
{{:tuple, normalized_elements}, next_counter_after_elements}
{:cons, head, tail} ->
{normalized_head, counter_after_head} = normalize_pass1(head, env, counter)
{normalized_tail, counter_after_tail} = normalize_pass1(tail, env, counter_after_head)
{{:cons, normalized_head, normalized_tail}, counter_after_tail}
{:integer_range, min, max} ->
range_spec =
if is_integer(min) and is_integer(max) and min > max do
:none
else
{:integer_range, min, max}
end
{range_spec, counter}
{:union, members} ->
normalize_union_pass1(members, env, counter)
{:intersect, members} ->
normalize_intersection_pass1(members, env, counter)
# THIS IS THE CANONICAL FIX.
{:list_of, element_spec} ->
# We transform `list_of(E)` into a `mu` expression.
# This expression will then be normalized by a recursive call.
# First, normalize the element's spec.
{normalized_element, counter_after_element} =
normalize_pass1(element_spec, env, counter)
# Create a *temporary, non-canonical* name for the recursive variable.
# The subsequent `normalize_pass1` call on the `mu` form will perform
# the proper, canonical renaming.
temp_rec_var = :"$list_of_rec_var"
list_body =
{:union,
[
{:literal, []},
{:cons, normalized_element, {:type_var, temp_rec_var}}
]}
# Now, normalize the full mu-expression. This is the crucial step.
# It will handle alpha-conversion of `temp_rec_var` and normalization
# of the body's components.
normalize_pass1({:mu, temp_rec_var, list_body}, env, counter_after_element)
# ... ensure the other cases are correct ...
{:mu, var_name, body} ->
# This logic is correct. It creates a fresh canonical name and
# adds it to the environment for normalizing the body.
fresh_temp_name = fresh_var_name(:p1_m_var, counter)
body_env = Map.put(env, var_name, {:type_var, fresh_temp_name})
{normalized_body, next_counter_after_body} =
normalize_pass1(body, body_env, counter + 1)
{{:mu, fresh_temp_name, normalized_body}, next_counter_after_body}
{:type_lambda, param_names, body} ->
{reversed_fresh_temp_names, next_counter_after_params, body_env} =
Enum.reduce(param_names, {[], counter, env}, fn param_name,
{acc_fresh_names, cnt, current_env} ->
fresh_name = fresh_var_name(:p1_lambda_var, cnt)
{[fresh_name | acc_fresh_names], cnt + 1,
Map.put(current_env, param_name, {:type_var, fresh_name})}
end)
fresh_temp_param_names = Enum.reverse(reversed_fresh_temp_names)
{normalized_body, final_counter} =
normalize_pass1(body, body_env, next_counter_after_params)
{{:type_lambda, fresh_temp_param_names, normalized_body}, final_counter}
{:type_apply, constructor_spec, arg_specs} ->
{normalized_constructor, counter_after_constructor} =
normalize_pass1(constructor_spec, env, counter)
{normalized_arg_specs, counter_after_args} =
map_fold_counter_for_pass1(
arg_specs,
env,
counter_after_constructor,
&normalize_pass1/3
)
case normalized_constructor do
{:type_lambda, pass1_formal_params, pass1_lambda_body} ->
if length(pass1_formal_params) != length(normalized_arg_specs) do
raise "TypeSpec.normalize_pass1: Arity mismatch in application. Expected #{length(pass1_formal_params)} args, got #{length(normalized_arg_specs)}. Lambda: #{inspect(normalized_constructor)}, Args: #{inspect(normalized_arg_specs)}"
else
substitution_map = Map.new(Enum.zip(pass1_formal_params, normalized_arg_specs))
substituted_body =
substitute_vars_pass1(pass1_lambda_body, substitution_map, MapSet.new())
normalize_pass1(substituted_body, env, counter_after_args)
end
_other_constructor ->
{{:type_apply, normalized_constructor, normalized_arg_specs}, counter_after_args}
end
other_spec ->
raise "TypeSpec.normalize_pass1: Unhandled spec form: #{inspect(other_spec)}"
end
res_tuple
end
defp map_fold_counter_for_pass1(list, env, initial_counter, fun) do
Enum.map_reduce(list, initial_counter, fn item, acc_counter ->
fun.(item, env, acc_counter)
end)
end
defp substitute_vars_pass1(spec, substitutions, bound_in_scope) do
case spec do
{:type_var, name} ->
if MapSet.member?(bound_in_scope, name) do
spec
else
Map.get(substitutions, name, spec)
end
{:mu, var_name, body} ->
newly_bound_scope = MapSet.put(bound_in_scope, var_name)
active_substitutions = Map.delete(substitutions, var_name)
{:mu, var_name, substitute_vars_pass1(body, active_substitutions, newly_bound_scope)}
{:type_lambda, param_names, body} ->
newly_bound_scope = Enum.reduce(param_names, bound_in_scope, &MapSet.put(&2, &1))
active_substitutions = Enum.reduce(param_names, substitutions, &Map.delete(&2, &1))
{:type_lambda, param_names,
substitute_vars_pass1(body, active_substitutions, newly_bound_scope)}
{:negation, sub} ->
{:negation, substitute_vars_pass1(sub, substitutions, bound_in_scope)}
{:tuple, elements} ->
{:tuple, Enum.map(elements, &substitute_vars_pass1(&1, substitutions, bound_in_scope))}
{:cons, h, t} ->
{:cons, substitute_vars_pass1(h, substitutions, bound_in_scope),
substitute_vars_pass1(t, substitutions, bound_in_scope)}
{:list_of, e} ->
{:list_of, substitute_vars_pass1(e, substitutions, bound_in_scope)}
{:union, members} ->
{:union, Enum.map(members, &substitute_vars_pass1(&1, substitutions, bound_in_scope))}
{:intersect, members} ->
{:intersect, Enum.map(members, &substitute_vars_pass1(&1, substitutions, bound_in_scope))}
{:type_apply, con, args} ->
new_con = substitute_vars_pass1(con, substitutions, bound_in_scope)
new_args = Enum.map(args, &substitute_vars_pass1(&1, substitutions, bound_in_scope))
{:type_apply, new_con, new_args}
_atomic_or_simple_spec ->
spec
end
end
defp normalize_negation_pass1(sub_spec, env, counter) do
{normalized_sub, next_counter} = normalize_pass1(sub_spec, env, counter)
res_spec =
case normalized_sub do
{:negation, inner_spec} -> inner_spec
:any -> :none
:none -> :any
_ -> {:negation, normalized_sub}
end
{res_spec, next_counter}
end
defp normalize_union_pass1(members, env, initial_counter) do
{list_of_normalized_member_lists, final_counter_after_all_members} =
Enum.map_reduce(members, initial_counter, fn member_spec, current_processing_counter ->
{normalized_member_spec_term, counter_after_this_member_normalized} =
normalize_pass1(member_spec, env, current_processing_counter)
members_to_add_to_overall_list =
case normalized_member_spec_term do
{:union, sub_members} -> sub_members
_ -> [normalized_member_spec_term]
end
{members_to_add_to_overall_list, counter_after_this_member_normalized}
end)
normalized_and_flattened = List.flatten(list_of_normalized_member_lists)
unique_members =
normalized_and_flattened
|> Enum.reject(&(&1 == :none))
|> Enum.uniq()
if Enum.member?(unique_members, :any) do
{:any, final_counter_after_all_members}
else
sorted_for_pass1 = Enum.sort(unique_members)
resulting_spec =
case sorted_for_pass1 do
[] -> :none
[single_member] -> single_member
list_members -> {:union, list_members}
end
{resulting_spec, final_counter_after_all_members}
end
end
defp normalize_intersection_pass1(members, env, initial_counter) do
{list_of_member_groups, final_counter_after_all_members} =
Enum.map_reduce(members, initial_counter, fn member_spec, current_processing_counter ->
{normalized_member_spec_term, counter_after_this_member_normalized} =
normalize_pass1(member_spec, env, current_processing_counter)
expanded_members =
case normalized_member_spec_term do
{:intersect, sub_members} -> sub_members
_ -> get_supertypes_pass1(normalized_member_spec_term)
end
{expanded_members, counter_after_this_member_normalized}
end)
normalized_and_flattened_with_supertypes = List.flatten(list_of_member_groups)
unique_members =
normalized_and_flattened_with_supertypes
|> Enum.reject(&(&1 == :any))
|> Enum.uniq()
if Enum.member?(unique_members, :none) do
{:none, final_counter_after_all_members}
else
sorted_for_pass1 = Enum.sort(unique_members)
resulting_spec =
case sorted_for_pass1 do
[] -> :any
[single_member] -> single_member
list_members -> {:intersect, list_members}
end
{resulting_spec, final_counter_after_all_members}
end
end
defp get_supertypes_pass1(spec) do
supertypes =
case spec do
{:literal, val} when is_atom(val) -> [:atom]
{:literal, val} when is_integer(val) -> [:integer]
{:literal, val} when is_list(val) -> [:list]
{:literal, val} when is_tuple(val) -> [:tuple]
{:mu, _v, _body} -> []
{:tuple, _} -> [:tuple]
{:integer_range, _, _} -> [:integer]
_ -> []
end
MapSet.to_list(MapSet.new([spec | supertypes]))
end
defp canonical_rename_pass(spec, env, mu_c, lambda_c) do
case spec do
{:mu, old_var_name, body} ->
new_canonical_name = fresh_var_name(:m_var, mu_c)
body_env = Map.put(env, old_var_name, {:type_var, new_canonical_name})
{renamed_body, next_mu_c, next_lambda_c} =
canonical_rename_pass(body, body_env, mu_c + 1, lambda_c)
{{:mu, new_canonical_name, renamed_body}, next_mu_c, next_lambda_c}
{:type_lambda, old_param_names, body} ->
{reversed_new_param_names, next_lambda_c_after_params, body_env} =
Enum.reduce(old_param_names, {[], lambda_c, env}, fn old_name,
{acc_new_names, current_lc,
current_env} ->
fresh_canonical_name = fresh_var_name(:lambda_var, current_lc)
{[fresh_canonical_name | acc_new_names], current_lc + 1,
Map.put(current_env, old_name, {:type_var, fresh_canonical_name})}
end)
new_canonical_param_names = Enum.reverse(reversed_new_param_names)
{renamed_body, final_mu_c, final_lambda_c} =
canonical_rename_pass(body, body_env, mu_c, next_lambda_c_after_params)
{{:type_lambda, new_canonical_param_names, renamed_body}, final_mu_c, final_lambda_c}
{:type_var, name} ->
{Map.get(env, name, spec), mu_c, lambda_c}
{:negation, sub_spec} ->
{renamed_sub, nmc, nlc} = canonical_rename_pass(sub_spec, env, mu_c, lambda_c)
{{:negation, renamed_sub}, nmc, nlc}
{:tuple, elements} ->
{renamed_elements, next_mu_c, next_lambda_c} =
map_foldl_counters_for_rename(elements, env, mu_c, lambda_c, &canonical_rename_pass/4)
{{:tuple, renamed_elements}, next_mu_c, next_lambda_c}
{:cons, head, tail} ->
{renamed_head, mu_c_after_head, lambda_c_after_head} =
canonical_rename_pass(head, env, mu_c, lambda_c)
{renamed_tail, mu_c_after_tail, lambda_c_after_tail} =
canonical_rename_pass(tail, env, mu_c_after_head, lambda_c_after_head)
{{:cons, renamed_head, renamed_tail}, mu_c_after_tail, lambda_c_after_tail}
{:union, members} ->
sorted_members = Enum.sort(members)
{renamed_members, next_mu_c, next_lambda_c} =
map_foldl_counters_for_rename(
sorted_members,
env,
mu_c,
lambda_c,
&canonical_rename_pass/4
)
{{:union, Enum.sort(renamed_members)}, next_mu_c, next_lambda_c}
{:intersect, members} ->
sorted_members = Enum.sort(members)
{renamed_members, next_mu_c, next_lambda_c} =
map_foldl_counters_for_rename(
sorted_members,
env,
mu_c,
lambda_c,
&canonical_rename_pass/4
)
{{:intersect, Enum.sort(renamed_members)}, next_mu_c, next_lambda_c}
{:type_apply, constructor_spec, arg_specs} ->
{renamed_constructor, mu_c_after_con, lambda_c_after_con} =
canonical_rename_pass(constructor_spec, env, mu_c, lambda_c)
{renamed_args, mu_c_after_args, lambda_c_after_args} =
map_foldl_counters_for_rename(
arg_specs,
env,
mu_c_after_con,
lambda_c_after_con,
&canonical_rename_pass/4
)
{{:type_apply, renamed_constructor, renamed_args}, mu_c_after_args, lambda_c_after_args}
s when is_atom(s) ->
{s, mu_c, lambda_c}
{:literal, _} = spec ->
{spec, mu_c, lambda_c}
{:integer_range, _, _} = spec ->
{spec, mu_c, lambda_c}
{:list_of, _} = spec ->
raise "TypeSpec.canonical_rename_pass: Unexpected :list_of, should be :mu. Spec: #{inspect(spec)}"
_other ->
raise "TypeSpec.canonical_rename_pass: Unhandled spec form: #{inspect(spec)}"
end
end
defp map_foldl_counters_for_rename(list, env, initial_mu_c, initial_lambda_c, fun) do
{reversed_results, final_mu_c, final_lambda_c} =
Enum.reduce(list, {[], initial_mu_c, initial_lambda_c}, fn item, {acc_items, mc, lc} ->
{processed_item, next_mc, next_lc} = fun.(item, env, mc, lc)
{[processed_item | acc_items], next_mc, next_lc}
end)
{Enum.reverse(reversed_results), final_mu_c, final_lambda_c}
end
defp fresh_var_name(prefix_atom, counter) do
:"#{Atom.to_string(prefix_atom)}#{counter}"
end
# Public API
@spec is_subtype?(t(), t()) :: boolean
def is_subtype?(spec1, spec2), do: is_subtype?(spec1, spec2, false)
# Internal helper with already_normalized flag
@spec is_subtype?(t(), t(), boolean) :: boolean
def is_subtype?(spec1, spec2, already_normalized) do
cond do
spec1 == spec2 ->
true
spec1 == :none ->
true
spec2 == :any ->
true
spec1 == :any and spec2 != :any ->
false
spec2 == :none and spec1 != :none ->
false
true ->
{norm_s1, norm_s2} =
if already_normalized do
{spec1, spec2}
else
{normalize(spec1), normalize(spec2)}
end
if norm_s1 == norm_s2 do
true
else
do_is_subtype_structural?(norm_s1, norm_s2, MapSet.new())
end
end
end
defp do_is_subtype_structural?(spec1, spec2, visited) do
if MapSet.member?(visited, {spec1, spec2}) do
true
else
cond do
spec1 == :none ->
true
spec2 == :any ->
true
spec1 == :any and spec2 != :any ->
false
spec2 == :none and spec1 != :none ->
false
spec1 == spec2 ->
true
true ->
new_visited = MapSet.put(visited, {spec1, spec2})
case {spec1, spec2} do
{{:union, members1}, _} ->
Enum.all?(members1, &do_is_subtype_structural?(&1, spec2, new_visited))
{_, {:union, members2}} ->
Enum.any?(members2, &do_is_subtype_structural?(spec1, &1, new_visited))
{{:intersect, members1}, _} ->
Enum.any?(members1, &do_is_subtype_structural?(&1, spec2, new_visited))
{_, {:intersect, members2}} ->
Enum.all?(members2, &do_is_subtype_structural?(spec1, &1, new_visited))
{s1, s2}
when is_atom(s1) and is_atom(s2) and s1 not in [:any, :none] and
s2 not in [:any, :none] ->
s1 == s2
{{:literal, v1}, {:literal, v2}} ->
v1 == v2
{{:literal, val}, :atom} when is_atom(val) ->
true
{{:literal, val}, :integer} when is_integer(val) ->
true
{{:literal, val}, :list} when is_list(val) ->
true
{{:literal, val}, :tuple} when is_tuple(val) ->
true
{{:tuple, elems1}, {:tuple, elems2}} when length(elems1) == length(elems2) ->
Enum.zip_with(elems1, elems2, &do_is_subtype_structural?(&1, &2, new_visited))
|> Enum.all?()
{{:tuple, _}, :tuple} ->
true
{{:integer_range, _, _}, :integer} ->
true
{{:integer_range, min1, max1}, {:integer_range, min2, max2}} ->
min1_gte_min2 =
case {min1, min2} do
{:neg_inf, _} -> min2 == :neg_inf
{_, :neg_inf} -> true
{m1_v, m2_v} when is_integer(m1_v) and is_integer(m2_v) -> m1_v >= m2_v
_ -> false
end
max1_lte_max2 =
case {max1, max2} do
{:pos_inf, _} -> max2 == :pos_inf
{_, :pos_inf} -> true
{m1_v, m2_v} when is_integer(m1_v) and is_integer(m2_v) -> m1_v <= m2_v
_ -> false
end
min1_gte_min2 and max1_lte_max2
{{:literal, val}, {:integer_range, min, max}} when is_integer(val) ->
(min == :neg_inf or val >= min) and (max == :pos_inf or val <= max)
{{:mu, v1, b1_body}, {:mu, v2, b2_body}} ->
cond do
is_list_mu_form(b1_body, v1) and is_list_mu_form(b2_body, v2) ->
e1 = extract_list_mu_element(b1_body, v1)
e2 = extract_list_mu_element(b2_body, v2)
do_is_subtype_structural?(e1, e2, new_visited)
# The key insight: for structural subtyping before TDDs,
# we only consider mu-types equivalent if their canonical forms are identical.
# My previous fix was too aggressive and belongs in a TDD-based check.
v1 == v2 ->
# They are alpha-equivalent
true
true ->
# Different canonical variables means different types at this stage.
false
end
# ADD THIS CASE: A concrete type vs. a recursive type.
# This is critical for checking if an instance belongs to a recursive type.
{_non_mu_spec, {:mu, v2, b2_body} = mu_spec2} ->
# Unfold the mu-type once and re-check.
unfolded_b2 = substitute_vars_canonical(b2_body, %{v2 => mu_spec2})
do_is_subtype_structural?(spec1, unfolded_b2, new_visited)
# ADD THIS CASE: A recursive type vs. a concrete type (usually false).
{{:mu, _, _}, _non_mu_spec} ->
# A recursive type (potentially infinite set) cannot be a subtype
# of a non-recursive, finite type (unless it's :any, which is already handled).
false
{{:negation, n_body1}, {:negation, n_body2}} ->
do_is_subtype_structural?(n_body2, n_body1, new_visited)
_ ->
false
end
end
end
end
defp substitute_vars_canonical(spec, substitutions) do
case spec do
{:type_var, name} ->
Map.get(substitutions, name, spec)
{:mu, var_name, body} ->
active_substitutions = Map.delete(substitutions, var_name)
{:mu, var_name, substitute_vars_canonical(body, active_substitutions)}
{:type_lambda, param_names, body} ->
active_substitutions = Enum.reduce(param_names, substitutions, &Map.delete(&2, &1))
{:type_lambda, param_names, substitute_vars_canonical(body, active_substitutions)}
{:negation, sub} ->
{:negation, substitute_vars_canonical(sub, substitutions)}
{:tuple, elements} ->
{:tuple, Enum.map(elements, &substitute_vars_canonical(&1, substitutions))}
{:cons, h, t} ->
{:cons, substitute_vars_canonical(h, substitutions),
substitute_vars_canonical(t, substitutions)}
{:list_of, e} ->
{:list_of, substitute_vars_canonical(e, substitutions)}
{:union, members} ->
{:union, Enum.map(members, &substitute_vars_canonical(&1, substitutions))}
{:intersect, members} ->
{:intersect, Enum.map(members, &substitute_vars_canonical(&1, substitutions))}
{:type_apply, con, args} ->
new_con = substitute_vars_canonical(con, substitutions)
new_args = Enum.map(args, &substitute_vars_canonical(&1, substitutions))
{:type_apply, new_con, new_args}
_atomic_or_simple_spec ->
spec
end
end
defp is_list_mu_form({:union, members}, rec_var_name) do
sorted_members = Enum.sort(members)
match?([{:literal, []}, {:cons, _elem, {:type_var, ^rec_var_name}}], sorted_members) or
match?([{:cons, _elem, {:type_var, ^rec_var_name}}, {:literal, []}], sorted_members)
end
defp is_list_mu_form(_, _), do: false
defp extract_list_mu_element({:union, members}, rec_var_name) do
Enum.find_value(members, fn
{:cons, elem_spec, {:type_var, ^rec_var_name}} -> elem_spec
_ -> nil
end) || :any
end
# Public API for get_supertypes
def get_supertypes(spec), do: get_supertypes(spec, false)
# Internal helper for get_supertypes
defp get_supertypes(spec_input, already_normalized) do
fully_normalized_spec = if already_normalized, do: spec_input, else: normalize(spec_input)
supertypes =
case fully_normalized_spec do
{:literal, val} when is_atom(val) -> [:atom]
{:literal, val} when is_integer(val) -> [:integer]
{:literal, val} when is_list(val) -> [:list]
{:literal, val} when is_tuple(val) -> [:tuple]
{:mu, v, body} -> if is_list_mu_form(body, v), do: [:list], else: []
{:tuple, _} -> [:tuple]
{:integer_range, _, _} -> [:integer]
_ -> []
end
MapSet.to_list(MapSet.new([fully_normalized_spec | supertypes]))
end
end
defmodule Tdd.Store do
@moduledoc """
Manages the state of the TDD system's node graph and operation cache.
This module acts as the stateful backend for the TDD algorithms. It is
responsible for creating unique, shared nodes (ensuring structural canonicity)
and for memoizing the results of expensive operations.
It is intentionally agnostic about the *meaning* of the variables within the
nodes; it treats them as opaque, comparable terms. The logic for interpreting
these variables resides in higher-level modules like `Tdd.Algo` and
`Tdd.Consistency.Engine`.
For simplicity, this implementation uses the Process dictionary for state.
In a production, concurrent application, this would be replaced by a `GenServer`
to ensure safe, serialized access to the shared TDD state.
"""
# --- State Keys ---
@nodes_key :tdd_nodes
@node_by_id_key :tdd_node_by_id
@next_id_key :tdd_next_id
@op_cache_key :tdd_op_cache
# --- Terminal Node IDs ---
@false_node_id 0
@true_node_id 1
# --- Public API ---
@doc "Initializes the TDD store in the current process."
def init do
# The main lookup table: {variable, y, n, d} -> id
Process.put(@nodes_key, %{})
# The reverse lookup table: id -> {variable, y, n, d} or :terminal
Process.put(@node_by_id_key, %{
@false_node_id => :false_terminal,
@true_node_id => :true_terminal
})
# The next available integer ID for a new node.
Process.put(@next_id_key, 2)
# The cache for memoizing operation results: {op, args} -> id
Process.put(@op_cache_key, %{})
:ok
end
@doc "Returns the ID for the TRUE terminal node (the 'any' type)."
@spec true_node_id() :: non_neg_integer()
def true_node_id, do: @true_node_id
@doc "Returns the ID for the FALSE terminal node (the 'none' type)."
@spec false_node_id() :: non_neg_integer()
def false_node_id, do: @false_node_id
@doc "Retrieves the details of a node by its ID."
@spec get_node(non_neg_integer()) ::
{:ok,
{variable :: term(), yes_id :: non_neg_integer(), no_id :: non_neg_integer(),
dc_id :: non_neg_integer()}}
| {:ok, :true_terminal | :false_terminal}
| {:error, :not_found}
def get_node(id) do
case Process.get(@node_by_id_key, %{}) do
%{^id => details} -> {:ok, details}
%{} -> {:error, :not_found}
end
end
@doc """
Finds an existing node that matches the structure or creates a new one.
This is the core function for ensuring structural sharing (part of the "Reduced"
property of ROBDDs). It also implements a fundamental reduction rule: if all
children of a node are identical, the node is redundant and is replaced by
its child.
"""
@spec find_or_create_node(
variable :: term(),
yes_id :: non_neg_integer(),
no_id :: non_neg_integer(),
dc_id :: non_neg_integer()
) :: non_neg_integer()
def find_or_create_node(variable, yes_id, no_id, dc_id) do
# Basic reduction rule: a node whose test is irrelevant is redundant.
if yes_id == no_id && yes_id == dc_id do
yes_id
else
node_tuple = {variable, yes_id, no_id, dc_id}
nodes = Process.get(@nodes_key, %{})
case Map.get(nodes, node_tuple) do
# Node already exists, return its ID for structural sharing.
id when is_integer(id) ->
id
# Node does not exist, create it.
nil ->
next_id = Process.get(@next_id_key)
node_by_id = Process.get(@node_by_id_key)
# Update all state tables
Process.put(@nodes_key, Map.put(nodes, node_tuple, next_id))
Process.put(@node_by_id_key, Map.put(node_by_id, next_id, node_tuple))
Process.put(@next_id_key, next_id + 1)
next_id
end
end
end
@doc "Retrieves a result from the operation cache."
@spec get_op_cache(term()) :: {:ok, term()} | :not_found
def get_op_cache(cache_key) do
case Process.get(@op_cache_key, %{}) do
%{^cache_key => result} -> {:ok, result}
%{} -> :not_found
end
end
@doc "Puts a result into the operation cache."
@spec put_op_cache(term(), term()) :: :ok
def put_op_cache(cache_key, result) do
# Using `get_and_update_in` would be safer but this is fine for this context.
cache = Process.get(@op_cache_key, %{})
Process.put(@op_cache_key, Map.put(cache, cache_key, result))
:ok
end
@doc """
Creates a unique, temporary placeholder node for a recursive spec.
Returns the ID of this placeholder.
"""
@spec create_placeholder(TypeSpec.t()) :: non_neg_integer()
def create_placeholder(spec) do
# The variable is a unique tuple that won't conflict with real predicates.
# The children are meaningless (-1) as they will be replaced.
find_or_create_node({:placeholder, spec}, 1, 0, 0)
end
@doc """
Updates a node's details directly. This is a special-purpose, mutable-style
operation used exclusively by the compiler to "tie the knot" for recursive types.
It updates both the forward and reverse lookup tables.
"""
@spec update_node_in_place(
non_neg_integer(),
new_details ::
{:ok,
{term(), non_neg_integer(), non_neg_integer(), non_neg_integer()}
| :true_terminal
| :false_terminal}
) :: :ok
def update_node_in_place(id, {:ok, new_details}) do
# Get current state
nodes = Process.get(@nodes_key)
node_by_id = Process.get(@node_by_id_key)
# 1. Find and remove the old reverse mapping from the `nodes` table.
# The node at `id` is a placeholder, so its details are what we created above.
old_details = Map.get(node_by_id, id)
nodes = Map.delete(nodes, old_details)
# 2. Add the new reverse mapping to the `nodes` table.
# But only if the new details correspond to a non-terminal node.
nodes =
case new_details do
{_v, _y, _n, _d} -> Map.put(nodes, new_details, id)
_ -> nodes
end
# 3. Update the main `node_by_id` table.
node_by_id = Map.put(node_by_id, id, new_details)
# 4. Save the updated tables.
Process.put(@nodes_key, nodes)
Process.put(@node_by_id_key, node_by_id)
:ok
end
end
defmodule Tdd.Variable do
# use Tdd.Debug
@moduledoc """
Defines the canonical structure for all Tdd predicate variables.
The structure `{category, predicate, value, padding}` is used to enforce a
stable global ordering. All variables are 4-element tuples to ensure that
Elixir's tuple-size-first comparison rule does not interfere with the
intended predicate ordering within a category.
"""
alias Tdd.TypeSpec
# --- Category 0: Primary Type Discriminators ---
# Padding with `nil` to make them 4-element tuples.
@spec v_is_atom() :: term()
def v_is_atom, do: {0, :is_atom, nil, nil}
@spec v_is_integer() :: term()
def v_is_integer, do: {0, :is_integer, nil, nil}
@spec v_is_list() :: term()
def v_is_list, do: {0, :is_list, nil, nil}
@spec v_is_tuple() :: term()
def v_is_tuple, do: {0, :is_tuple, nil, nil}
# --- Category 1: Atom Properties ---
@spec v_atom_eq(atom()) :: term()
def v_atom_eq(atom_val) when is_atom(atom_val), do: {1, :value, atom_val, nil}
# --- Category 2: Integer Properties ---
@spec v_int_lt(integer()) :: term()
def v_int_lt(n) when is_integer(n), do: {2, :alt, n, nil}
@spec v_int_eq(integer()) :: term()
def v_int_eq(n) when is_integer(n), do: {2, :beq, n, nil}
@spec v_int_gt(integer()) :: term()
def v_int_gt(n) when is_integer(n), do: {2, :cgt, n, nil}
# --- Category 4: Tuple Properties ---
# The most complex var here is `:b_element` with index and nested var.
# So all vars in this category need to be at least 4-element.
@spec v_tuple_size_eq(non_neg_integer()) :: term()
def v_tuple_size_eq(size) when is_integer(size) and size >= 0, do: {4, :a_size, size, nil}
@spec v_tuple_elem_pred(non_neg_integer(), term()) :: term()
def v_tuple_elem_pred(index, nested_pred_var) when is_integer(index) and index >= 0 do
{4, :b_element, index, nested_pred_var}
end
# --- Category 5: List Properties ---
@doc "Predicate: The list is the empty list `[]`."
@spec v_list_is_empty() :: term()
def v_list_is_empty, do: {5, :b_is_empty, nil, nil}
@doc "Predicate: Applies a nested predicate to the head of a non-empty list."
@spec v_list_head_pred(term()) :: term()
def v_list_head_pred(nested_pred_var), do: {5, :c_head, nested_pred_var, nil}
@doc "Predicate: Applies a nested predicate to the tail of a non-empty list."
@spec v_list_tail_pred(term()) :: term()
def v_list_tail_pred(nested_pred_var), do: {5, :d_tail, nested_pred_var, nil}
end
defmodule Tdd.Predicate.Info do
@moduledoc "A knowledge base for the properties of TDD predicate variables."
alias Tdd.Variable
@doc "Returns a map of traits for a given predicate variable."
@spec get_traits(term()) :: map() | nil
def get_traits({0, :is_atom, _, _}) do
%{
type: :primary,
category: :atom,
implies: [
{Variable.v_is_integer(), false},
{Variable.v_is_list(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_integer, _, _}) do
%{
type: :primary,
category: :integer,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_list(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_list, _, _}) do
%{
type: :primary,
category: :list,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_integer(), false},
{Variable.v_is_tuple(), false}
]
}
end
def get_traits({0, :is_tuple, _, _}) do
%{
type: :primary,
category: :tuple,
implies: [
{Variable.v_is_atom(), false},
{Variable.v_is_integer(), false},
{Variable.v_is_list(), false}
]
}
end
# --- The rest of the module is unchanged ---
def get_traits({1, :value, _val, _}) do
%{type: :atom_value, category: :atom, implies: [{Variable.v_is_atom(), true}]}
end
def get_traits({2, :alt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :beq, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({2, :cgt, _, _}),
do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]}
def get_traits({4, :a_size, _, _}) do
%{type: :tuple_prop, category: :tuple, implies: [{Variable.v_is_tuple(), true}]}
end
def get_traits({4, :b_element, index, _nested_var}) do
%{
type: :tuple_recursive,
category: :tuple,
sub_key: {:elem, index},
implies: [{Variable.v_is_tuple(), true}]
}
end
# def get_traits({5, :a_all_elements, element_spec, _}) do
# %{
# type: :list_recursive_ambient,
# category: :list,
# ambient_constraint_spec: element_spec,
# implies: [{Variable.v_is_list(), true}]
# }
# end
def get_traits({5, :b_is_empty, _, _}) do
%{type: :list_prop, category: :list, implies: [{Variable.v_is_list(), true}]}
end
def get_traits({5, :c_head, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :head,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
def get_traits({5, :d_tail, _nested_var, _}) do
%{
type: :list_recursive,
category: :list,
sub_key: :tail,
implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}]
}
end
def get_traits(_), do: nil
end
# in a new file, e.g., lib/tdd/consistency/engine.ex
defmodule Tdd.Consistency.Engine do
# use Tdd.Debug
@moduledoc """
A rule-based engine for checking the semantic consistency of a set of assumptions.
This engine is the "oracle" for the `Tdd.Algo.simplify/2` function. It takes
a set of assumptions about predicate variables (e.g., `{is_atom, true}`,
`{value == :foo, true}`) and determines if that set is logically consistent.
The check is performed recursively. At each level, it:
1. **Expands** assumptions with their local implications (e.g. `value == :foo` implies `is_atom`).
2. **Checks for flat contradictions** at the current level (e.g., `is_atom` and `is_integer`).
3. **Groups** assumptions by sub-problem (e.g., all `head(...)` predicates).
4. **Recursively calls** the checker on each sub-problem with unwrapped predicates.
"""
alias Tdd.Predicate.Info
alias Tdd.Variable
@doc """
Checks if a map of assumptions is logically consistent.
Returns `:consistent` or `:contradiction`.
"""
@spec check(map()) :: :consistent | :contradiction
def check(assumptions) do
# The main entry point now calls the recursive helper.
# The logic from the old `check/1` is now inside `do_check/1`.
do_check(assumptions)
end
@doc """
Expands a map of assumptions with all their logical implications.
Returns `{:ok, expanded_map}` or `{:error, :contradiction}`.
"""
@spec expand(map()) :: {:ok, map()} | {:error, :contradiction}
def expand(assumptions) do
# Just expose the internal helper.
expand_with_implications(assumptions)
end
# --- The Core Recursive Checker ---
defp do_check(assumptions) do
# 1. Expand assumptions with immediate, local implications.
with {:ok, expanded} <- expand_with_implications(assumptions),
# 2. Check for contradictions among the flat predicates at this level.
:ok <- check_flat_consistency(expanded) do
# 3. Group the expanded assumptions into sub-problems based on their scope.
sub_problems =
expanded
|> Enum.group_by(fn {var, _val} -> (Info.get_traits(var) || %{})[:sub_key] end)
# Drop top-level keys, we only want recursive sub-problems.
|> Map.drop([nil])
# 4. If there are no sub-problems, and we passed the flat check, we're consistent.
if map_size(sub_problems) == 0 do
:consistent
else
# 5. Recursively check each sub-problem. Stop at the first contradiction.
Enum.find_value(sub_problems, :consistent, fn {_sub_key, sub_assumptions_list} ->
# Unwrap the variables for the recursive call
# e.g., {{5, :c_head, NESTED_VAR, _}, val} -> {NESTED_VAR, val}
remapped_assumptions = remap_sub_problem_vars(sub_assumptions_list)
# If any sub-problem is a contradiction, the whole set is.
case do_check(remapped_assumptions) do
# Not a contradiction, continue checking others.
:consistent -> nil
# Contradiction found, halt and return.
:contradiction -> :contradiction
end
end)
end
else
# `expand_with_implications` or `check_flat_consistency` failed.
{:error, _reason} -> :contradiction
end
end
# --- Recursive Checking Helpers ---
@doc "Converts a list of scoped assumptions into a map of base assumptions for a sub-problem."
defp remap_sub_problem_vars(assumptions_list) do
Map.new(assumptions_list, fn {var, val} ->
{unwrap_var(var), val}
end)
end
@doc "Extracts the inner predicate variable from a recursive variable."
defp unwrap_var(var) do
case var do
{4, :b_element, _index, nested_pred_var} -> nested_pred_var
{5, :c_head, nested_pred_var, _} -> nested_pred_var
{5, :d_tail, nested_pred_var, _} -> nested_pred_var
# This function is only called on vars that are known to be recursive,
# so other cases are not expected.
other -> other
end
end
# --- Step 1: Implication Expansion (Unchanged) ---
defp expand_with_implications(assumptions) do
expand_loop(assumptions, assumptions)
end
defp expand_loop(new_assumptions, all_assumptions) do
implications =
Enum.flat_map(new_assumptions, fn
{var, true} -> Map.get(Info.get_traits(var) || %{}, :implies, [])
_ -> []
end)
case Enum.reduce(implications, {:ok, %{}}, fn {implied_var, implied_val}, acc ->
reduce_implication({implied_var, implied_val}, all_assumptions, acc)
end) do
{:error, :contradiction} = err ->
err
{:ok, newly_added} when map_size(newly_added) == 0 ->
{:ok, all_assumptions}
{:ok, newly_added} ->
expand_loop(newly_added, Map.merge(all_assumptions, newly_added))
end
end
defp reduce_implication({var, val}, all_assumptions, {:ok, new_acc}) do
case Map.get(all_assumptions, var) do
nil -> {:ok, Map.put(new_acc, var, val)}
^val -> {:ok, new_acc}
_other_val -> {:error, :contradiction}
end
end
defp reduce_implication(_implication, _all_assumptions, error_acc), do: error_acc
# --- Step 2: Flat Consistency Checks (Unchanged) ---
defp check_flat_consistency(assumptions) do
with :ok <- check_primary_type_exclusivity(assumptions),
:ok <- check_atom_consistency(assumptions),
:ok <- check_list_consistency(assumptions),
:ok <- check_integer_consistency(assumptions),
:ok <- check_tuple_consistency(assumptions) do
:ok
else
:error -> {:error, :consistency_error}
end
end
defp check_primary_type_exclusivity(assumptions) do
primary_types = [
Variable.v_is_atom(),
Variable.v_is_integer(),
Variable.v_is_list(),
Variable.v_is_tuple()
]
true_primary_types = Enum.count(primary_types, &(Map.get(assumptions, &1) == true))
if true_primary_types > 1, do: :error, else: :ok
end
defp check_atom_consistency(assumptions) do
true_atom_values =
Enum.reduce(assumptions, MapSet.new(), fn
{{1, :value, atom_val, _}, true}, acc -> MapSet.put(acc, atom_val)
_, acc -> acc
end)
if MapSet.size(true_atom_values) > 1, do: :error, else: :ok
end
defp check_tuple_consistency(assumptions) do
true_tuple_sizes =
Enum.reduce(assumptions, MapSet.new(), fn
{{4, :a_size, size, _}, true}, acc -> MapSet.put(acc, size)
_, acc -> acc
end)
if MapSet.size(true_tuple_sizes) > 1, do: :error, else: :ok
end
defp check_list_consistency(assumptions) do
is_empty = Map.get(assumptions, Variable.v_list_is_empty()) == true
has_head_prop = Enum.any?(assumptions, &match?({{5, :c_head, _, _}, true}, &1))
has_tail_prop = Enum.any?(assumptions, &match?({{5, :d_tail, _, _}, true}, &1))
if is_empty and (has_head_prop or has_tail_prop), do: :error, else: :ok
end
defp check_integer_consistency(assumptions) do
initial_range = {:neg_inf, :pos_inf}
result =
Enum.reduce_while(assumptions, initial_range, fn assumption, {min, max} ->
case assumption do
{{2, :alt, n, _}, true} -> narrow_range(min, safe_min(max, n - 1))
{{2, :alt, n, _}, false} -> narrow_range(safe_max(min, n), max)
{{2, :beq, n, _}, true} -> narrow_range(safe_max(min, n), safe_min(max, n))
{{2, :beq, n, _}, false} when min == n and max == n -> {:halt, :invalid}
{{2, :cgt, n, _}, true} -> narrow_range(safe_max(min, n + 1), max)
{{2, :cgt, n, _}, false} -> narrow_range(min, safe_min(max, n))
_ -> {:cont, {min, max}}
end
end)
case result,
do: (
:invalid -> :error
_ -> :ok
)
end
defp narrow_range(min, max) do
is_invalid =
case {min, max} do
{:neg_inf, _} -> false
{_, :pos_inf} -> false
{m, n} when is_integer(m) and is_integer(n) -> m > n
_ -> false
end
if is_invalid, do: {:halt, :invalid}, else: {:cont, {min, max}}
end
defp safe_max(:neg_inf, x), do: x
defp safe_max(x, :neg_inf), do: x
defp safe_max(:pos_inf, _), do: :pos_inf
defp safe_max(_, :pos_inf), do: :pos_inf
defp safe_max(a, b), do: :erlang.max(a, b)
defp safe_min(:pos_inf, x), do: x
defp safe_min(x, :pos_inf), do: x
defp safe_min(:neg_inf, _), do: :neg_inf
defp safe_min(_, :neg_inf), do: :neg_inf
defp safe_min(a, b), do: :erlang.min(a, b)
end
defmodule Tdd.Algo do
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
use Tdd.Debug
alias Tdd.Store
alias Tdd.Consistency.Engine
alias Tdd.Debug
# --- Binary Operation: Apply ---
@spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
def apply(op_name, op_lambda, u1_id, u2_id) do
# Memoization wrapper
cache_key = {:apply, op_name, Enum.sort([u1_id, u2_id])}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_apply(op_name, op_lambda, u1_id, u2_id)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
defp do_apply(op_name, op_lambda, u1_id, u2_id) do
with {:ok, u1_details} <- Store.get_node(u1_id),
{:ok, u2_details} <- Store.get_node(u2_id) do
cond do
(u1_details == :true_terminal or u1_details == :false_terminal) and
(u2_details == :true_terminal or u2_details == :false_terminal) ->
if op_lambda.(u1_details, u2_details) == :true_terminal,
do: Store.true_node_id(),
else: Store.false_node_id()
u1_details == :true_terminal or u1_details == :false_terminal ->
{var2, y2, n2, d2} = u2_details
Store.find_or_create_node(
var2,
apply(op_name, op_lambda, u1_id, y2),
apply(op_name, op_lambda, u1_id, n2),
apply(op_name, op_lambda, u1_id, d2)
)
u2_details == :true_terminal or u2_details == :false_terminal ->
{var1, y1, n1, d1} = u1_details
Store.find_or_create_node(
var1,
apply(op_name, op_lambda, y1, u2_id),
apply(op_name, op_lambda, n1, u2_id),
apply(op_name, op_lambda, d1, u2_id)
)
true ->
{var1, y1, n1, d1} = u1_details
{var2, y2, n2, d2} = u2_details
top_var = Enum.min([var1, var2])
res_y =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: y1, else: u1_id),
if(var2 == top_var, do: y2, else: u2_id)
)
res_n =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: n1, else: u1_id),
if(var2 == top_var, do: n2, else: u2_id)
)
res_d =
apply(
op_name,
op_lambda,
if(var1 == top_var, do: d1, else: u1_id),
if(var2 == top_var, do: d2, else: u2_id)
)
Store.find_or_create_node(top_var, res_y, res_n, res_d)
end
end
end
# --- Unary Operation: Negation ---
@spec negate(non_neg_integer) :: non_neg_integer
def negate(tdd_id) do
cache_key = {:negate, tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id =
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.false_node_id()
{:ok, :false_terminal} ->
Store.true_node_id()
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(var, negate(y), negate(n), negate(d))
end
Store.put_op_cache(cache_key, result_id)
result_id
{:error, :not_found} ->
result_id =
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.false_node_id()
{:ok, :false_terminal} ->
Store.true_node_id()
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(var, negate(y), negate(n), negate(d))
end
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# --- Unary Operation: Semantic Simplification ---
@doc """
Simplifies a TDD under a set of external assumptions.
This is the main public entry point. It sets up the context for the
coinductive cycle detection.
"""
@spec simplify(non_neg_integer(), map()) :: non_neg_integer
def simplify(tdd_id, assumptions \\ %{}) do
# The main cache uses a sorted list of assumptions for a canonical key.
sorted_assumptions = Enum.sort(assumptions)
cache_key = {:simplify, tdd_id, sorted_assumptions}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
# Start the recursive simplification with an empty context.
# The context is a MapSet to track visited (id, assumptions) pairs on the call stack.
result_id = do_simplify(tdd_id, sorted_assumptions, MapSet.new())
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# The private helper now takes a `context` to detect infinite recursion.
# The private helper now takes a `context` to detect infinite recursion.
defp do_simplify(tdd_id, sorted_assumptions, context) do
Debug.log_entry("do_simplify(#{tdd_id}, #{inspect(sorted_assumptions)})")
current_state = {tdd_id, sorted_assumptions}
# Coinductive base case: if we have seen this exact state before in this
# call stack, we are in a loop.
if MapSet.member?(context, current_state) do
Debug.log("LOOP DETECTED", "do_simplify: Coinductive Case")
Store.true_node_id()
|> Debug.log_exit("do_simplify RETURNING from loop")
else
new_context = MapSet.put(context, current_state)
assumptions = Map.new(sorted_assumptions)
# If the assumptions themselves are contradictory, the result is `none`.
if Engine.check(assumptions) == :contradiction do
Debug.log(assumptions, "do_simplify: Assumptions are contradictory")
Debug.log_exit(Store.false_node_id(), "do_simplify RETURNING from contradiction")
else
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Debug.log_exit(Store.true_node_id(), "do_simplify RETURNING from true_terminal")
{:ok, :false_terminal} ->
Debug.log_exit(Store.false_node_id(), "do_simplify RETURNING from false_terminal")
{:ok, {var, y, n, d}} ->
Debug.log(var, "do_simplify: Node variable")
# NEW: Add a pattern match here to detect recursive variables
case var do
# This variable means "the head must satisfy the type represented by NESTED_VAR"
{5, :c_head, nested_var, _} ->
handle_recursive_subproblem(
:simplify,
:head,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
# This variable means "the tail must satisfy the type represented by NESTED_VAR"
{5, :d_tail, nested_var, _} ->
handle_recursive_subproblem(
:simplify,
:tail,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
# This variable means "element `index` must satisfy NESTED_VAR"
{4, :b_element, index, nested_var} ->
handle_recursive_subproblem(
:simplify,
{:elem, index},
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
_ ->
# Check if the variable's value is already explicitly known.
case Map.get(assumptions, var) do
true ->
do_simplify(y, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (true)")
false ->
do_simplify(n, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (false)")
:dc ->
do_simplify(d, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (dc)")
# The variable's value is not explicitly known.
# Check if the context of assumptions *implies* its value.
nil ->
Debug.log(nil, "do_simplify: Var not known, checking implications")
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) ==
:contradiction
|> Debug.log("do_simplify: assumptions_imply_true?")
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) ==
:contradiction
|> Debug.log("do_simplify: assumptions_imply_false?")
cond do
# This case should be caught by the top-level Engine.check, but is here for safety.
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
# If the context implies `var` must be true, we follow the 'yes' path.
# We do NOT add `{var, true}` to the assumptions for the recursive call,
# as that would cause the assumption set to grow infinitely and break
# the coinductive check. The original `assumptions` set already
# contains all the necessary information for the engine to work.
assumptions_imply_true ->
do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
|> Debug.log_exit("do_simplify RETURNING from implied var (true)")
# Likewise, if the context implies `var` must be false, follow the 'no' path
# with the original, unmodified assumptions.
assumptions_imply_false ->
do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
|> Debug.log_exit("do_simplify RETURNING from implied var (false)")
# The variable is truly independent. We must simplify all branches,
# adding the new assumption for each respective path, and reconstruct the node.
true ->
Debug.log("independent", "do_simplify: Var is independent, branching")
s_y =
do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
s_n =
do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
s_d =
do_simplify(d, Enum.sort(Map.put(assumptions, var, :dc)), new_context)
Store.find_or_create_node(var, s_y, s_n, s_d)
|> Debug.log_exit("do_simplify RETURNING from branch reconstruction")
end
end
end
end
end
end
end
@doc """
Recursively traverses a TDD graph from `root_id`, creating a new graph
where all references to `from_id` are replaced with `to_id`.
This is a pure function used to "tie the knot" in recursive type compilation.
"""
@spec substitute(
root_id :: non_neg_integer(),
from_id :: non_neg_integer(),
to_id :: non_neg_integer()
) ::
non_neg_integer()
def substitute(root_id, from_id, to_id) do
# Handle the trivial case where the root is the node to be replaced.
if root_id == from_id, do: to_id, else: do_substitute(root_id, from_id, to_id)
end
# The private helper uses memoization to avoid re-computing identical sub-graphs.
defp do_substitute(root_id, from_id, to_id) do
cache_key = {:substitute, root_id, from_id, to_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id =
case Store.get_node(root_id) do
# Terminal nodes are unaffected unless they are the target of substitution.
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
# For internal nodes, recursively substitute in all children.
{:ok, {var, y, n, d}} ->
new_y = substitute(y, from_id, to_id)
new_n = substitute(n, from_id, to_id)
new_d = substitute(d, from_id, to_id)
Store.find_or_create_node(var, new_y, new_n, new_d)
# This case should not be hit if the graph is well-formed.
{:error, reason} ->
raise "substitute encountered an error getting node #{root_id}: #{reason}"
end
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# defp do_simplify(tdd_id, assumptions) do
# IO.inspect([tdd_id, assumptions], label: "do_simplify(tdd_id, assumptions)")
# # First, check if the current assumption set is already a contradiction.
# # If so, any TDD under these assumptions is empty (:none).
# if Engine.check(assumptions) == :contradiction do
# Store.false_node_id()
# else
# case Store.get_node(tdd_id) do
# {:ok, :true_terminal} ->
# Store.true_node_id()
#
# {:ok, :false_terminal} ->
# Store.false_node_id()
#
# {:ok, {var, y, n, d}} ->
# # Check if the variable's value is already known or implied by the assumptions.
# case Map.get(assumptions, var) do
# true ->
# simplify(y, assumptions)
#
# false ->
# simplify(n, assumptions)
#
# :dc ->
# simplify(d, assumptions)
#
# nil ->
# # The variable's value is not explicitly known.
# # We must ask the Consistency.Engine if the assumptions *imply* a value.
#
# # Does the context imply `var` must be true?
# # This is true if adding `var == false` creates a contradiction.
# assumptions_imply_true =
# Engine.check(Map.put(assumptions, var, false)) == :contradiction
#
# # Does the context imply `var` must be false?
# # This is true if adding `var == true` creates a contradiction.
# assumptions_imply_false =
# Engine.check(Map.put(assumptions, var, true)) == :contradiction
#
# cond do
# # This can happen if the base assumptions are contradictory,
# # but it's handled at the top of the function.
# assumptions_imply_true and assumptions_imply_false ->
# Store.false_node_id()
#
# # The context forces `var` to be true, so we only need to follow the 'yes' path.
# assumptions_imply_true ->
# simplify(y, assumptions)
#
# # The context forces `var` to be false, so we only need to follow the 'no' path.
# assumptions_imply_false ->
# simplify(n, assumptions)
#
# # The variable is truly independent of the current assumptions.
# # We must simplify all branches and reconstruct the node.
# true ->
# s_y = simplify(y, Map.put(assumptions, var, true))
# s_n = simplify(n, Map.put(assumptions, var, false))
# s_d = simplify(d, Map.put(assumptions, var, :dc))
# Store.find_or_create_node(var, s_y, s_n, s_d)
# end
# end
# end
# end
# end
@doc """
Checks if a TDD is coinductively empty.
This is used for subtyping checks. Returns `0` if empty, or a non-zero ID otherwise.
"""
@spec check_emptiness(non_neg_integer()) :: non_neg_integer()
def check_emptiness(tdd_id) do
cache_key = {:check_emptiness, tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, id} ->
id
:not_found ->
# The main call starts with no assumptions.
# We convert the map to a sorted list for the recursive helper
# to ensure canonical cache keys inside the recursion.
assumptions_list = []
result_id = do_check_emptiness(tdd_id, assumptions_list, MapSet.new())
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# This is the recursive helper for the emptiness check.
# It is almost identical to do_simplify, but with the opposite coinductive rule.
defp do_check_emptiness(tdd_id, sorted_assumptions, context) do
current_state = {tdd_id, sorted_assumptions}
# Coinductive base case for EMPTINESS
if MapSet.member?(context, current_state) do
# A loop means no finite counterexample was found on this path.
# So, for the emptiness check, this path is considered empty.
Store.false_node_id()
else
new_context = MapSet.put(context, current_state)
assumptions = Map.new(sorted_assumptions)
if Engine.check(assumptions) == :contradiction do
Store.false_node_id()
else
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
{:ok, {var, y, n, d}} ->
case var do
# NEW: Add the same pattern matches here
{5, :c_head, nested_var, _} ->
handle_recursive_subproblem(
:check_emptiness,
:head,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
{5, :d_tail, nested_var, _} ->
handle_recursive_subproblem(
:check_emptiness,
:tail,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
{4, :b_element, index, nested_var} ->
handle_recursive_subproblem(
:check_emptiness,
{:elem, index},
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
_ ->
case Map.get(assumptions, var) do
true ->
do_check_emptiness(y, sorted_assumptions, new_context)
false ->
do_check_emptiness(n, sorted_assumptions, new_context)
:dc ->
do_check_emptiness(d, sorted_assumptions, new_context)
nil ->
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) == :contradiction
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) == :contradiction
cond do
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
assumptions_imply_true ->
do_check_emptiness(
y,
Enum.sort(Map.put(assumptions, var, true)),
new_context
)
assumptions_imply_false ->
do_check_emptiness(
n,
Enum.sort(Map.put(assumptions, var, false)),
new_context
)
true ->
s_y =
do_check_emptiness(
y,
Enum.sort(Map.put(assumptions, var, true)),
new_context
)
s_n =
do_check_emptiness(
n,
Enum.sort(Map.put(assumptions, var, false)),
new_context
)
s_d =
do_check_emptiness(
d,
Enum.sort(Map.put(assumptions, var, :dc)),
new_context
)
Store.find_or_create_node(var, s_y, s_n, s_d)
end
end
end
end
end
end
end
@doc """
Handles recursive simplification by setting up a sub-problem.
It gathers all assumptions related to a sub-component (like the head or tail),
unwraps them, and uses them to simplify the TDD of the sub-component's type.
"""
defp handle_recursive_subproblem(
algo_type,
sub_key,
constraint_spec, # This is now a TypeSpec, not an ID or a variable.
node_details,
sorted_assumptions,
context
) do
{_var, y, n, _d} = node_details
assumptions = Map.new(sorted_assumptions)
# 1. Partition assumptions to get those relevant to the sub-problem.
{sub_assumptions_raw, other_assumptions_list} =
Enum.partition(assumptions, fn {v, _} ->
(Tdd.Predicate.Info.get_traits(v) || %{})[:sub_key] == sub_key
end)
# 2. Reconstruct the sub-spec from the relevant assumptions.
# First, remap the keys from their scoped form to their base form.
sub_assumptions_map =
Map.new(sub_assumptions_raw, fn {{_c, _p, inner_v, _n}, val} -> {inner_v, val} end)
reconstructed_sub_spec = Tdd.TypeReconstructor.spec_from_assumptions(sub_assumptions_map)
# 3. Check if the reconstructed spec is a subtype of the required constraint spec.
# The `constraint_spec` is now passed in directly from the TDD variable.
is_sub = Tdd.Compiler.is_subtype_with_context(reconstructed_sub_spec, constraint_spec, context)
# 4. Branch accordingly.
if is_sub do
# The constraint is satisfied. Predicate is 'true'. Follow YES.
case algo_type do
:simplify -> do_simplify(y, sorted_assumptions, context)
:check_emptiness -> do_check_emptiness(y, sorted_assumptions, context)
end
else
# The constraint is violated. Predicate is 'false'. Follow NO.
case algo_type do
:simplify -> do_simplify(n, Enum.sort(other_assumptions_list), context)
:check_emptiness -> do_check_emptiness(n, Enum.sort(other_assumptions_list), context)
end
end
end
end
defmodule Tdd.TypeReconstructor do
@moduledoc """
Reconstructs a high-level `TypeSpec` from a low-level assumption map.
This module performs the inverse operation of the TDD compiler. It takes a
set of predicate assumptions (e.g., from a path in a TDD) and synthesizes
the most specific `TypeSpec` that satisfies all of those assumptions.
"""
use Tdd.Debug
alias Tdd.TypeSpec
alias Tdd.Predicate.Info
alias Tdd.Variable
@doc """
Takes a map of `{variable, boolean}` assumptions and returns a `TypeSpec`.
"""
@spec spec_from_assumptions(map()) :: TypeSpec.t()
def spec_from_assumptions(assumptions) do
# 1. Partition assumptions into groups for the top-level entity and its sub-components.
partitions =
Enum.group_by(assumptions, fn {var, _val} ->
case Info.get_traits(var) do
# :head or :tail
%{type: :list_recursive, sub_key: key} -> key
# {:elem, index}
%{type: :tuple_recursive, sub_key: key} -> key
# All other predicates apply to the top-level entity
_ -> :top_level
end
end)
# 2. Reconstruct the spec for the top-level entity from its flat assumptions.
top_level_assumptions = Map.get(partitions, :top_level, []) |> Map.new()
top_level_spec = spec_from_flat_assumptions(top_level_assumptions)
# 2.5 Get implied base specs
implied_base_specs =
partitions
|> Map.keys()
|> Enum.map(fn
key when key in [:head, :tail] -> :list
{:elem, _} -> :tuple
# Other keys don't imply a base type
_ -> nil
end)
|> Enum.reject(&is_nil/1)
# 3. Recursively reconstruct specs for all sub-problems (head, tail, elements).
sub_problem_specs =
partitions
|> Map.drop([:top_level])
|> Enum.map(fn {sub_key, sub_assumptions_list} ->
# Re-map the nested variables back to their base form for the recursive call.
# e.g., {{5, :c_head, NESTED_VAR, _}, val} -> {NESTED_VAR, val}
remapped_assumptions =
sub_assumptions_list
|> Map.new(fn {var, val} ->
# This pattern matching is a bit simplified for clarity
{_cat, _pred, nested_var_or_idx, maybe_nested_var} = var
nested_var =
if is_nil(maybe_nested_var), do: nested_var_or_idx, else: maybe_nested_var
{nested_var, val}
end)
# Recursively build the spec for the sub-problem
sub_spec = spec_from_assumptions(remapped_assumptions)
# Wrap it in a constructor that describes its relationship to the parent
case sub_key do
# Partial spec: just describes the head
:head ->
{:cons, sub_spec, :any}
# Partial spec: just describes the tail
:tail ->
{:cons, :any, sub_spec}
{:elem, index} ->
# Create a sparse tuple spec, e.g., {any, any, <sub_spec>, any}
# This is complex, a simpler approach is needed for now.
# For now, we'll just return a tuple spec that isn't fully specific.
# A full implementation would need to know the tuple's size.
# This is an oversimplification but works for demo
{:tuple, [sub_spec]}
end
end)
# 4. The final spec is the intersection of the top-level spec and all sub-problem specs.
final_spec_list = [top_level_spec | sub_problem_specs ++ implied_base_specs]
TypeSpec.normalize({:intersect, final_spec_list})
end
@doc "Handles only the 'flat' (non-recursive) assumptions for a single entity."
defp spec_from_flat_assumptions(assumptions) do
specs =
Enum.map(assumptions, fn {var, bool_val} ->
# Convert each assumption into a `TypeSpec`.
# A `true` assumption means the type is `X`.
# A `false` assumption means the type is `¬X`.
spec =
case var do
{0, :is_atom, _, _} -> :atom
{0, :is_integer, _, _} -> :integer
{0, :is_list, _, _} -> :list
{0, :is_tuple, _, _} -> :tuple
{1, :value, val, _} -> {:literal, val}
# For integer properties, we create a range spec. This part could be more detailed.
# x < n
{2, :alt, n, _} -> {:integer_range, :neg_inf, n - 1}
{2, :beq, n, _} -> {:literal, n}
# x > n
{2, :cgt, n, _} -> {:integer_range, n + 1, :pos_inf}
# Simplified for now
{4, :a_size, _, _} -> :tuple
{5, :b_is_empty, _, _} -> {:literal, []}
# --- THIS IS THE FIX ---
# Correctly reconstruct a list_of spec from the ambient predicate.
# {5, :a_all_elements, element_spec, _} ->
# {:list_of, element_spec}
# Ignore recursive and ambient vars at this flat level
_ -> :any
end
if bool_val, do: spec, else: {:negation, spec}
end)
# The result is the intersection of all the individual specs.
TypeSpec.normalize({:intersect, specs})
end
end
defmodule Tdd.Compiler do
# use Tdd.Debug # Keep if you use it
@moduledoc "Compiles a `TypeSpec` into a canonical TDD ID."
alias Tdd.TypeSpec
alias Tdd.Variable
alias Tdd.Store
alias Tdd.Algo
alias Tdd.Debug
@doc """
The main public entry point. Takes a spec and returns its TDD ID.
"""
@spec spec_to_id(TypeSpec.t()) :: non_neg_integer()
def spec_to_id(spec) do
normalized_spec = TypeSpec.normalize(spec)
# Context maps canonical var names from :mu (e.g. :m_var0) to placeholder TDD IDs.
compile_normalized_spec(normalized_spec, %{})
end
# Internal function that expects a NORMALIZED spec.
# Handles caching, :mu binding, and :type_var resolution.
defp compile_normalized_spec(normalized_spec, context) do
# Cache key uses the normalized spec. Context is implicitly handled because
# type variables within normalized_spec are already canonical.
# If normalized_spec is a :type_var, its resolution depends on context,
# so it shouldn't hit the main cache directly if context is non-empty.
cache_key = {:spec_to_id, normalized_spec}
case normalized_spec do
{:type_var, var_name} ->
case Map.get(context, var_name) do
nil ->
raise "Tdd.Compiler: Unbound type variable during TDD compilation: #{inspect(var_name)}. Full spec: #{inspect(normalized_spec)}. Context: #{inspect(context)}"
placeholder_id when is_integer(placeholder_id) ->
# This is a placeholder ID for a mu-bound variable
placeholder_id
end
# Not a top-level type variable, proceed with caching/compilation
_other_form ->
case Store.get_op_cache(cache_key) do
{:ok, id} ->
id
:not_found ->
id_to_cache =
case normalized_spec do
{:mu, var_name, body_spec} ->
# var_name is canonical, e.g., :m_var0
# Create a placeholder for this specific var_name's recursive usage.
# The placeholder itself needs a unique ID in the TDD store.
# Using {:recursive_var_placeholder, var_name} as the *variable* for the placeholder node.
placeholder_node_variable_tag = {:mu_placeholder_for_var, var_name}
placeholder_id = Store.create_placeholder(placeholder_node_variable_tag)
new_context = Map.put(context, var_name, placeholder_id)
# Recursively compile the body. body_spec is already normalized.
compiled_body_id = compile_normalized_spec(body_spec, new_context)
# Knot-tying:
# The TDD graph rooted at `compiled_body_id` contains `placeholder_id` at recursive points.
# We want a new graph where these `placeholder_id`s now point to `compiled_body_id` itself.
# The result of this substitution is the TDD for the mu-type.
#
# If `update_node_in_place` is the chosen mechanism:
# Store.update_node_in_place(placeholder_id, Store.get_node(compiled_body_id))
# final_id = placeholder_id # The placeholder ID becomes the ID of the mu-type
#
# If `Algo.substitute` is used (as in current `compile_recursive_spec`):
# This creates a new graph where occurrences of `placeholder_id` in `compiled_body_id`'s graph
# are replaced by `compiled_body_id` itself.
# The root of the mu-type's TDD is this new graph.
final_id = Algo.substitute(compiled_body_id, placeholder_id, compiled_body_id)
Algo.simplify(final_id)
# Atomic types, literals, and compound types (union, intersect, etc.)
other ->
Tdd.Debug.log(other, "compile_normalized_spec")
# Pass context for children's compilation
raw_id = do_structural_compile(normalized_spec, context)
Algo.simplify(raw_id)
end
Store.put_op_cache(cache_key, id_to_cache)
id_to_cache
end
end
end
# This function compiles the actual structure of a normalized spec.
# It assumes `structural_spec` is NOT a top-level :mu or :type_var (those are handled by compile_normalized_spec).
# It calls `compile_normalized_spec` for its children to ensure they are properly cached and mu/type_var handled.
defp do_structural_compile(structural_spec, context) do
case structural_spec do
:any ->
Store.true_node_id()
:none ->
Store.false_node_id()
:atom ->
create_base_type_tdd(Variable.v_is_atom())
:integer ->
create_base_type_tdd(Variable.v_is_integer())
:list ->
create_base_type_tdd(Variable.v_is_list())
:tuple ->
create_base_type_tdd(Variable.v_is_tuple())
{:literal, val} when is_atom(val) ->
compile_value_equality(:atom, Variable.v_atom_eq(val), context)
{:literal, val} when is_integer(val) ->
compile_value_equality(:integer, Variable.v_int_eq(val), context)
# Empty list
{:literal, []} ->
compile_value_equality(:list, Variable.v_list_is_empty(), context)
# Add other literal types if necessary, e.g. literal tuples
{:integer_range, min, max} ->
compile_integer_range(min, max, context)
{:union, specs} ->
Enum.map(specs, &compile_normalized_spec(&1, context))
|> Enum.reduce(Store.false_node_id(), fn id, acc ->
Algo.apply(:sum, &op_union_terminals/2, id, acc)
end)
{:intersect, specs} ->
Enum.map(specs, &compile_normalized_spec(&1, context))
|> Enum.reduce(Store.true_node_id(), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
{:negation, sub_spec} ->
Algo.negate(compile_normalized_spec(sub_spec, context))
{:cons, head_spec, tail_spec} ->
# The logic is now much more direct. We don't need a helper.
id_list = compile_normalized_spec(:list, context)
id_is_empty = create_base_type_tdd(Variable.v_list_is_empty())
id_not_is_empty = Algo.negate(id_is_empty)
non_empty_list_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id_list, id_not_is_empty)
# Embed the SPECS directly into the variables.
head_checker_var = Variable.v_list_head_pred(head_spec)
head_checker_tdd = create_base_type_tdd(head_checker_var)
tail_checker_var = Variable.v_list_tail_pred(tail_spec)
tail_checker_tdd = create_base_type_tdd(tail_checker_var)
[non_empty_list_id, head_checker_tdd, tail_checker_tdd]
|> Enum.reduce(compile_normalized_spec(:any, context), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
{:tuple, elements_specs} ->
# Renamed for clarity
compile_tuple_from_elements(elements_specs, context)
# These should not be encountered here if TypeSpec.normalize worked correctly for ground types.
{:type_lambda, _, _} ->
raise "Tdd.Compiler: Cannot compile :type_lambda directly. Spec should be ground. Spec: #{inspect(structural_spec)}"
{:type_apply, _, _} ->
raise "Tdd.Compiler: Cannot compile :type_apply directly. Spec should be ground and fully beta-reduced. Spec: #{inspect(structural_spec)}"
# :mu and :type_var are handled by compile_normalized_spec directly.
# :list_of is normalized to :mu by TypeSpec.normalize.
# Catch-all for unexpected specs at this stage
_ ->
raise "Tdd.Compiler.do_structural_compile: Unhandled structural spec form: #{inspect(structural_spec)}"
end
end
# --- Private Helper Functions (mostly from old do_spec_to_id) ---
# Ensure they use `compile_normalized_spec` for sub-specs and pass `context`.
defp create_base_type_tdd(var),
do:
Store.find_or_create_node(
var,
Store.true_node_id(),
Store.false_node_id(),
Store.false_node_id()
)
defp compile_value_equality(base_type_spec, value_var, context) do
eq_node = create_base_type_tdd(value_var)
# base_type_spec is an atom like :atom, :integer. It needs to be compiled.
base_node_id = compile_normalized_spec(base_type_spec, context)
Algo.apply(:intersect, &op_intersect_terminals/2, base_node_id, eq_node)
end
defp compile_integer_range(min, max, context) do
# Ensure :integer is compiled via main path
base_id = compile_normalized_spec(:integer, context)
lt_min_tdd = if min != :neg_inf, do: create_base_type_tdd(Variable.v_int_lt(min))
# Use :any for unbounded
gte_min_tdd =
if lt_min_tdd, do: Algo.negate(lt_min_tdd), else: compile_normalized_spec(:any, context)
id_with_min = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, gte_min_tdd)
if max == :pos_inf do
id_with_min
else
# x <= max is equivalent to x < max + 1
lt_max_plus_1_tdd = create_base_type_tdd(Variable.v_int_lt(max + 1))
Algo.apply(:intersect, &op_intersect_terminals/2, id_with_min, lt_max_plus_1_tdd)
end
end
defp compile_cons_from_ids(h_id, t_id, context) do
# TDD for `is_list & !is_empty`
# Ensures :list is properly compiled
id_list = compile_normalized_spec(:list, context)
id_is_empty = create_base_type_tdd(Variable.v_list_is_empty())
id_not_is_empty = Algo.negate(id_is_empty)
non_empty_list_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id_list, id_not_is_empty)
head_checker = sub_problem(&Variable.v_list_head_pred/1, h_id, context)
tail_checker = sub_problem(&Variable.v_list_tail_pred/1, t_id, context)
[non_empty_list_id, head_checker, tail_checker]
# Start intersection with :any
|> Enum.reduce(compile_normalized_spec(:any, context), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
end
defp compile_tuple_from_elements(elements_specs, context) do
size = length(elements_specs)
base_id = compile_normalized_spec(:tuple, context)
size_tdd = create_base_type_tdd(Variable.v_tuple_size_eq(size))
initial_id = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, size_tdd)
elements_specs
|> Enum.with_index()
|> Enum.reduce(initial_id, fn {elem_spec, index}, acc_id ->
# Embed the element's SPEC directly into the variable.
elem_checker_var = Variable.v_tuple_elem_pred(index, elem_spec)
elem_checker_tdd = create_base_type_tdd(elem_checker_var)
Algo.apply(:intersect, &op_intersect_terminals/2, acc_id, elem_checker_tdd)
end)
end
# sub_problem and do_sub_problem remain largely the same,
# but ensure `spec_to_id` called within `do_sub_problem` (for placeholder case)
# is `compile_normalized_spec`.
defp do_sub_problem(sub_key_constructor, tdd_id, context) do
cond do
# Should not happen with valid placeholder IDs
tdd_id < 0 ->
Store.false_node_id()
true ->
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
{:ok, {{:mu_placeholder_for_var, var_name_in_placeholder}, _, _, _}} ->
# This is a placeholder node encountered during sub_problem traversal.
# This means `tdd_id` is a `placeholder_id` for some `var_name`.
# We are creating a TDD for `sub_key_constructor(var_name = placeholder_id)`.
# This path is tricky. The `sub_problem` is meant to PUSH constraints inwards.
# Example: list_of(X) -> cons(X, list_of(X)).
# When compiling `list_of(X)`, head is `X`. `sub_problem(v_list_head_pred, compile(X), ...)`
# If `X` itself is `mu Y. ...`, then `compile(X)` becomes `P_Y`.
# So `sub_problem(v_list_head_pred, P_Y, ...)`
# Inside `do_sub_problem`, `tdd_id` is `P_Y`. Its node details are `mu_placeholder_for_var, Y`.
# This indicates that the sub-problem applies to whatever `Y` resolves to.
# This branch in do_sub_problem seems to handle if the *tdd_id itself* is a placeholder.
# The original logic for `{{:placeholder, spec}, _, _, _}`:
# dummy_var = sub_key_constructor.(:dummy)
# case dummy_var do ... uses spec ...
# This `spec` was the spec of the list_of/tuple that the placeholder represented.
# Now, placeholder is for `var_name`. `spec` is not directly available here.
# This part of `do_sub_problem` might need adjustment or may become less relevant
# if placeholders are simpler.
# For now, let's assume `tdd_id` passed to sub_problem is a fully compiled TDD (or terminal).
# If `tdd_id` *is* a placeholder ID, `Store.get_node(tdd_id)` will return its placeholder structure.
# The original `compile_recursive_spec`'s `substitute` call handles the cycles.
# `sub_problem` should operate on the structure of `tdd_id` *after* it's potentially resolved.
# The `substitute` call makes the graph cyclic. `sub_problem` might then traverse this cycle.
# `Algo.simplify`'s coinductive handling should manage this.
# This specific case `{{:mu_placeholder_for_var, ...}}` means tdd_id *is* a raw placeholder,
# which shouldn't happen if `substitute` was called correctly to make it point to a real structure.
# This suggests that `sub_problem` should perhaps not be called with raw placeholder_ids.
#
# Re-thinking: `sub_problem` constructs `Var(SubTDD)`.
# `Var` is `v_list_head_pred(InnerVar)`. `SubTDD` is the TDD for the head type.
# `sub_problem` is essentially `map(tdd_id, fn node_var -> sub_key_constructor.(node_var))`.
# If `tdd_id` is `P_Y`, then we are making `sub_key_constructor(P_Y_var)`.
# This seems correct. The placeholder variable in the store is `({:mu_placeholder_for_var, var_name}, 1,0,0)`.
# So `var` below would be `{:mu_placeholder_for_var, var_name_in_placeholder}`.
raise "Tdd.Compiler.do_sub_problem: Encountered raw placeholder node details. This path needs review. Details: #{inspect(Store.get_node(tdd_id))}"
# Normal node
{:ok, {var, y, n, d}} ->
Store.find_or_create_node(
sub_key_constructor.(var),
# Pass context
sub_problem(sub_key_constructor, y, context),
# Pass context
sub_problem(sub_key_constructor, n, context),
# Pass context
sub_problem(sub_key_constructor, d, context)
)
{:error, reason} ->
raise "Tdd.Compiler.do_sub_problem: Error getting node for ID #{tdd_id}: #{reason}"
end
end
end
# Memoization wrapper for sub_problem
defp sub_problem(sub_key_constructor, tdd_id, context) do
# Context is not part of cache key for sub_problem, as its effect is on how tdd_id was built.
# The structure of tdd_id is what matters for sub_problem's transformation.
cache_key =
{:sub_problem, Atom.to_string(elem(sub_key_constructor.(:dummy_var_for_cache_key), 1)),
tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, result_id} ->
result_id
:not_found ->
result_id = do_sub_problem(sub_key_constructor, tdd_id, context)
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# --- Terminal Logic Helpers (unchanged) ---
defp op_union_terminals(:true_terminal, _), do: :true_terminal
defp op_union_terminals(_, :true_terminal), do: :true_terminal
defp op_union_terminals(t, :false_terminal), do: t
defp op_union_terminals(:false_terminal, t), do: t
defp op_intersect_terminals(:false_terminal, _), do: :false_terminal
defp op_intersect_terminals(_, :false_terminal), do: :false_terminal
defp op_intersect_terminals(t, :true_terminal), do: t
defp op_intersect_terminals(:true_terminal, t), do: t
# --- Public Subtyping Check (from CompilerAlgoTests) ---
@doc "Checks if spec1 is a subtype of spec2 using TDDs."
@doc """
Internal subtype check that carries the compilation context for mu-variables.
"""
# def is_subtype_with_context(spec1, spec2, context) do
# # We don't need the full debug logging here, it's an internal call.
# id1 = compile_normalized_spec(spec1, context)
# id2 = compile_normalized_spec(spec2, context)
#
# neg_id2 = Algo.negate(id2)
# intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
# final_id = Algo.check_emptiness(intersect_id)
#
# final_id == Store.false_node_id()
# end
# Make the public function use this new internal one with an empty context.
def is_subtype(spec1, spec2) do
Debug.log({spec1, spec2}, "Compiler.is_subtype/2: INPUTS")
# Normalize specs UPFRONT
norm_s1 = Tdd.TypeSpec.normalize(spec1)
norm_s2 = Tdd.TypeSpec.normalize(spec2)
# Start the check with a fresh context.
is_subtype_with_context(norm_s1, norm_s2, %{})
end
def is_subtype_with_context(spec1, spec2, context) do
id1 = compile_normalized_spec(spec1, context)
id2 = compile_normalized_spec(spec2, context)
neg_id2 = Algo.negate(id2)
intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
final_id = Algo.check_emptiness(intersect_id)
final_id == Store.false_node_id()
end
def is_subtype(spec1, spec2) do
Debug.log({spec1, spec2}, "Compiler.is_subtype/2: INPUTS")
id1 =
spec_to_id(spec1)
|> Debug.log("Compiler.is_subtype/2: id1 (for spec1)")
id2 =
spec_to_id(spec2)
|> Debug.log("Compiler.is_subtype/2: id2 (for spec2)")
neg_id2 =
Algo.negate(id2)
|> Debug.log("Compiler.is_subtype/2: neg_id2")
intersect_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
|> Debug.log("Compiler.is_subtype/2: intersect_id (id1 & ~id2)")
final_id =
Algo.check_emptiness(intersect_id)
|> Debug.log("Compiler.is_subtype/2: final_id (after check_emptiness)")
result =
final_id ==
Store.false_node_id()
|> Debug.log("Compiler.is_subtype/2: FINAL RESULT (is final_id == 0?)")
result
end
# @spec is_subtype(TypeSpec.t(), TypeSpec.t()) :: boolean
# def is_subtype(spec1, spec2) do
# id1 = spec_to_id(spec1)
# id2 = spec_to_id(spec2)
# # A <: B <=> A & ~B == none
# neg_id2 = Algo.negate(id2)
# intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
# # Simplify is crucial for coinductive reasoning with recursive types
# final_id = Algo.simplify(intersect_id)
# final_id == Store.false_node_id()
# end
end
####
# xxx
####
defmodule TddStoreTests do
def test(name, expected, result) do
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Store Tests ---")
Process.put(:test_failures, [])
# --- Test Setup ---
Tdd.Store.init()
# --- Test Cases ---
IO.puts("\n--- Section: Initialization and Terminals ---")
test("true_node_id returns 1", 1, Tdd.Store.true_node_id())
test("false_node_id returns 0", 0, Tdd.Store.false_node_id())
test("get_node for ID 1 returns true_terminal", {:ok, :true_terminal}, Tdd.Store.get_node(1))
test(
"get_node for ID 0 returns false_terminal",
{:ok, :false_terminal},
Tdd.Store.get_node(0)
)
test(
"get_node for unknown ID returns not_found",
{:error, :not_found},
Tdd.Store.get_node(99)
)
IO.puts("\n--- Section: Node Creation and Structural Sharing ---")
# Define some opaque variables
var_a = {:is_atom}
var_b = {:is_integer}
true_id = Tdd.Store.true_node_id()
false_id = Tdd.Store.false_node_id()
# Create a new node. It should get ID 2.
id1 = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test("First created node gets ID 2", 2, id1)
# Verify its content
test(
"get_node for ID 2 returns the correct tuple",
{:ok, {var_a, true_id, false_id, false_id}},
Tdd.Store.get_node(id1)
)
# Create another, different node. It should get ID 3.
id2 = Tdd.Store.find_or_create_node(var_b, id1, false_id, false_id)
test("Second created node gets ID 3", 3, id2)
# Attempt to create the first node again.
id1_again = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id)
test(
"Attempting to create an existing node returns the same ID (Structural Sharing)",
id1,
id1_again
)
# Check that next_id was not incremented by the shared call
id3 = Tdd.Store.find_or_create_node(var_b, true_id, false_id, false_id)
test("Next new node gets the correct ID (4)", 4, id3)
IO.puts("\n--- Section: Basic Reduction Rule ---")
# Create a node where all children are the same.
id_redundant = Tdd.Store.find_or_create_node(var_a, id3, id3, id3)
test(
"A node with identical children reduces to the child's ID",
id3,
id_redundant
)
IO.puts("\n--- Section: Caching ---")
cache_key = {:my_op, 1, 2}
test("Cache is initially empty for a key", :not_found, Tdd.Store.get_op_cache(cache_key))
Tdd.Store.put_op_cache(cache_key, :my_result)
test(
"Cache returns the stored value after put",
{:ok, :my_result},
Tdd.Store.get_op_cache(cache_key)
)
Tdd.Store.put_op_cache(cache_key, :new_result)
test("Cache can be updated", {:ok, :new_result}, Tdd.Store.get_op_cache(cache_key))
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Store tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TypeSpecTests do
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, tested) do
current_failures = Process.get(:test_failures, [])
result = TypeSpec.normalize(tested)
# Use a custom comparison to handle potentially unsorted lists in expected values
# The normalize function *should* sort, but this makes tests more robust.
is_equal =
case {expected, result} do
{{:union, list1}, {:union, list2}} -> Enum.sort(list1) == Enum.sort(list2)
{{:intersect, list1}, {:intersect, list2}} -> Enum.sort(list1) == Enum.sort(list2)
_ -> expected == result
end
if is_equal do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" tested: #{inspect(tested)}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeSpec.normalize/1 Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Base & Simple Types ---
IO.puts("\n--- Section: Base & Simple Types ---")
test("Normalizing :any is idempotent", :any, :any)
test("Normalizing :none is idempotent", :none, :none)
test("Normalizing :atom is idempotent", :atom, :atom)
test("Normalizing a literal is idempotent", {:literal, :foo}, {:literal, :foo})
# --- Test Section: Double Negation ---
IO.puts("\n--- Section: Double Negation ---")
test("¬(¬atom) simplifies to atom", :atom, {:negation, {:negation, :atom}})
test("A single negation is preserved", {:negation, :integer}, {:negation, :integer})
test(
"¬(¬(¬atom)) simplifies to ¬atom",
{:negation, :atom},
{:negation, {:negation, {:negation, :atom}}}
)
# --- Test Section: Union Normalization ---
IO.puts("\n--- Section: Union Normalization ---")
test(
"Flattens nested unions",
{:union, [:atom, :integer, :list]},
{:union, [:integer, {:union, [:list, :atom]}]}
)
test(
"Sorts members of a union",
{:union, [:atom, :integer, :list]},
{:union, [:list, :integer, :atom]}
)
test(
"Removes duplicates in a union",
{:union, [:atom, :integer]},
{:union, [:integer, :atom, :integer]}
)
test("Simplifies a union with :none (A | none -> A)", :atom, {:union, [:atom, :none]})
test("Simplifies a union with :any (A | any -> any)", :any, {:union, [:atom, :any]})
test("An empty union simplifies to :none", :none, {:union, []})
test("A union containing only :none simplifies to :none", :none, {:union, [:none, :none]})
test("A union of a single element simplifies to the element itself", :atom, {:union, [:atom]})
# --- Test Section: Intersection Normalization ---
IO.puts("\n--- Section: Intersection Normalization ---")
test(
"Flattens nested intersections",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:integer, {:intersect, [:list, :atom]}]}
)
test(
"Sorts members of an intersection",
{:intersect, [:atom, :integer, :list]},
{:intersect, [:list, :integer, :atom]}
)
test(
"Removes duplicates in an intersection",
{:intersect, [:atom, :integer]},
{:intersect, [:integer, :atom, :integer]}
)
test(
"Simplifies an intersection with :any (A & any -> A)",
:atom,
{:intersect, [:atom, :any]}
)
test(
"Simplifies an intersection with :none (A & none -> none)",
:none,
{:intersect, [:atom, :none]}
)
test("An empty intersection simplifies to :any", :any, {:intersect, []})
test(
"An intersection of a single element simplifies to the element itself",
:atom,
{:intersect, [:atom]}
)
# --- Test Section: Recursive Normalization ---
IO.puts("\n--- Section: Recursive Normalization ---")
test(
"Recursively normalizes elements in a tuple",
{:tuple, [:atom, {:union, [{:literal, :a}, {:literal, :b}]}]},
{:tuple, [{:union, [:atom]}, {:union, [{:literal, :a}, {:literal, :b}]}]}
)
test(
"Recursively normalizes head and tail in a cons",
{:cons, :any, {:negation, :integer}},
{:cons, {:union, [:atom, :any]}, {:negation, {:union, [:integer]}}}
)
test(
"Recursively normalizes element in list_of",
{:mu, :m_var0, {:union, [{:literal, []}, {:cons, :list, {:type_var, :m_var0}}]}},
{:list_of, {:intersect, [:any, :list]}}
)
test(
"Recursively normalizes sub-spec in negation",
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}},
{:negation, {:union, [{:literal, :a}, {:literal, :b}]}}
)
# --- Test Section: Complex Nested Cases ---
IO.puts("\n--- Section: Complex Nested Cases ---")
complex_spec =
{:union,
[
:atom,
# simplifies to :integer
{:intersect, [:any, :integer, {:intersect, [:integer]}]},
# simplifies to :list
{:union, [:none, :list]}
]}
test(
"Handles complex nested simplifications correctly",
{:union, [:atom, :integer, :list]},
complex_spec
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeSpec tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures:")
Enum.each(failures, &IO.puts(" - #{&1}"))
end
end
end
defmodule TddVariableTests do
alias Tdd.Variable
alias Tdd.TypeSpec
# Simple test helper function
defp test(name, expected, result) do
current_failures = Process.get(:test_failures, [])
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.Variable Tests ---")
Process.put(:test_failures, [])
# --- Test Section: Variable Structure ---
IO.puts("\n--- Section: Variable Structure ---")
test("v_is_atom returns correct tuple", {0, :is_atom, nil, nil}, Variable.v_is_atom())
test("v_atom_eq returns correct tuple", {1, :value, :foo, nil}, Variable.v_atom_eq(:foo))
test("v_int_lt returns correct tuple", {2, :alt, 10, nil}, Variable.v_int_lt(10))
test(
"v_tuple_size_eq returns correct tuple",
{4, :a_size, 2, nil},
Variable.v_tuple_size_eq(2)
)
test(
"v_tuple_elem_pred nests a variable correctly",
{4, :b_element, 0, {0, :is_integer, nil, nil}},
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
test(
"v_list_is_empty returns correct tuple",
{5, :b_is_empty, nil, nil},
Variable.v_list_is_empty()
)
test(
"v_list_head_pred nests a variable correctly",
{5, :c_head, {0, :is_atom, nil, nil}, nil},
Variable.v_list_head_pred(Variable.v_is_atom())
)
# test(
# "v_list_all_elements_are nests a TypeSpec correctly",
# {5, :a_all_elements, {:union, [:atom, :integer]}, nil},
# Variable.v_list_all_elements_are(TypeSpec.normalize({:union, [:integer, :atom]}))
# )
# --- Test Section: Global Ordering ---
IO.puts("\n--- Section: Global Ordering (Based on Elixir Term Comparison) ---")
# Category 0 < Category 1
test(
"Primary type var < Atom property var",
true,
Variable.v_is_tuple() < Variable.v_atom_eq(:anything)
)
# Within Category 2: alt < beq < cgt
test(
"Integer :lt var < Integer :eq var",
true,
Variable.v_int_lt(10) < Variable.v_int_eq(10)
)
test(
"Integer :eq var < Integer :gt var",
true,
Variable.v_int_eq(10) < Variable.v_int_gt(10)
)
# Within Category 2: comparison of value
test(
"Integer :eq(5) var < Integer :eq(10) var",
true,
Variable.v_int_eq(5) < Variable.v_int_eq(10)
)
# Within Category 4: comparison of index
test(
"Tuple elem(0) var < Tuple elem(1) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(1, Variable.v_is_atom())
)
# Within Category 4, same index: comparison of nested var
test(
"Tuple elem(0, atom) var < Tuple elem(0, int) var",
true,
Variable.v_tuple_elem_pred(0, Variable.v_is_atom()) <
Variable.v_tuple_elem_pred(0, Variable.v_is_integer())
)
# IO.inspect(Variable.v_list_all_elements_are(:atom),
# label: "Variable.v_list_all_elements_are(:atom)"
# )
IO.inspect(Variable.v_list_is_empty(), label: "Variable.v_list_is_empty()")
# test(
# "List :a_all_elements var < List :b_is_empty var",
# true,
# Variable.v_list_all_elements_are(:atom) < Variable.v_list_is_empty()
# )
test(
"List :b_is_empty var < List :c_head var",
true,
Variable.v_list_is_empty() < Variable.v_list_head_pred(Variable.v_is_atom())
)
test(
"List :c_head var < List :tail var",
true,
Variable.v_list_head_pred(Variable.v_is_atom()) <
Variable.v_list_tail_pred(Variable.v_is_atom())
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Variable tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule ConsistencyEngineTests do
alias Tdd.Consistency.Engine
alias Tdd.Variable
defp test(name, expected, assumptions_map) do
result = Engine.check(assumptions_map)
# ... test reporting logic ...
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic & Implication Tests ---
IO.puts("\n--- Section: Basic & Implication Tests ---")
test("An empty assumption map is consistent", :consistent, %{})
test("A single valid assumption is consistent", :consistent, %{Variable.v_is_atom() => true})
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"An implied contradiction is caught by expander",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
)
test(
"Implication creates a consistent set",
:consistent,
# implies is_atom=true
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: Primary Type Exclusivity ---
IO.puts("\n--- Section: Primary Type Exclusivity ---")
test(
"Two primary types cannot both be true",
:contradiction,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
)
test(
"Two primary types implied to be true is a contradiction",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_int_eq(5) => true}
)
test(
"One primary type true and another false is consistent",
:consistent,
%{Variable.v_is_atom() => true, Variable.v_is_integer() => false}
)
# --- Section: Atom Consistency ---
IO.puts("\n--- Section: Atom Consistency ---")
test(
"An atom cannot equal two different values",
:contradiction,
%{Variable.v_atom_eq(:foo) => true, Variable.v_atom_eq(:bar) => true}
)
test(
"An atom can equal one value",
:consistent,
%{Variable.v_atom_eq(:foo) => true}
)
# --- Section: List Flat Consistency ---
IO.puts("\n--- Section: List Flat Consistency ---")
test(
"A list cannot be empty and have a head property",
:contradiction,
%{
Variable.v_list_is_empty() => true,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list can have a head property",
:consistent,
%{
Variable.v_list_is_empty() => false,
Variable.v_list_head_pred(Variable.v_is_atom()) => true
}
)
test(
"A non-empty list is implied by head property",
:consistent,
# implies is_empty=false
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# --- Section: Integer Consistency ---
IO.puts("\n--- Section: Integer Consistency ---")
test("int == 5 is consistent", :consistent, %{Variable.v_int_eq(5) => true})
test("int == 5 AND int == 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_eq(10) => true
})
test("int < 10 AND int > 20 is a contradiction", :contradiction, %{
Variable.v_int_lt(10) => true,
Variable.v_int_gt(20) => true
})
test("int > 5 AND int < 4 is a contradiction", :contradiction, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(4) => true
})
# 6
test("int > 5 AND int < 7 is consistent", :consistent, %{
Variable.v_int_gt(5) => true,
Variable.v_int_lt(7) => true
})
test("int == 5 AND int < 3 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_lt(3) => true
})
test("int == 5 AND int > 10 is a contradiction", :contradiction, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(10) => true
})
test("int == 5 AND int > 3 is consistent", :consistent, %{
Variable.v_int_eq(5) => true,
Variable.v_int_gt(3) => true
})
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Consistency.Engine tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TddAlgoTests do
alias Tdd.Store
alias Tdd.Variable
alias Tdd.Algo
# We need this to create stable variables
alias Tdd.TypeSpec
# --- Test Helper ---
defp test(name, expected, result) do
# A simple equality test is sufficient here.
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
# Helper to pretty print a TDD for debugging
defp print_tdd(id, indent \\ 0) do
prefix = String.duplicate(" ", indent)
case Store.get_node(id) do
{:ok, details} ->
IO.puts("#{prefix}ID #{id}: #{inspect(details)}")
case details do
{_var, y, n, d} ->
IO.puts("#{prefix} Yes ->")
print_tdd(y, indent + 1)
IO.puts("#{prefix} No ->")
print_tdd(n, indent + 1)
IO.puts("#{prefix} DC ->")
print_tdd(d, indent + 1)
_ ->
:ok
end
{:error, reason} ->
IO.puts("#{prefix}ID #{id}: Error - #{reason}")
end
end
# --- Test Runner ---
def run() do
IO.puts("\n--- Running Tdd.Algo & Tdd.Consistency.Engine Tests ---")
Process.put(:test_failures, [])
# Setup: Initialize the store and define some basic TDDs using the new modules.
Store.init()
true_id = Store.true_node_id()
false_id = Store.false_node_id()
# --- Manually build some basic type TDDs for testing ---
# t_atom = if is_atom then true else false
t_atom = Store.find_or_create_node(Variable.v_is_atom(), true_id, false_id, false_id)
# t_int = if is_int then true else false
t_int = Store.find_or_create_node(Variable.v_is_integer(), true_id, false_id, false_id)
# t_foo = if is_atom then (if value == :foo then true else false) else false
foo_val_check =
Store.find_or_create_node(Variable.v_atom_eq(:foo), true_id, false_id, false_id)
t_foo = Store.find_or_create_node(Variable.v_is_atom(), foo_val_check, false_id, false_id)
# t_bar = if is_atom then (if value == :bar then true else false) else false
bar_val_check =
Store.find_or_create_node(Variable.v_atom_eq(:bar), true_id, false_id, false_id)
t_bar = Store.find_or_create_node(Variable.v_is_atom(), bar_val_check, false_id, false_id)
# --- Section: Negate Algorithm ---
IO.puts("\n--- Section: Algo.negate ---")
negated_true = Algo.negate(true_id)
test("negate(true) is false", false_id, negated_true)
negated_false = Algo.negate(false_id)
test("negate(false) is true", true_id, negated_false)
# Double negation should be identity
test("negate(negate(t_atom)) is t_atom", t_atom, Algo.negate(Algo.negate(t_atom)))
# --- Section: Apply Algorithm (Union & Intersection) ---
IO.puts("\n--- Section: Algo.apply (raw structural operations) ---")
op_sum = fn
:true_terminal, _ -> :true_terminal
_, :true_terminal -> :true_terminal
t, :false_terminal -> t
:false_terminal, t -> t
end
op_intersect = fn
:false_terminal, _ -> :false_terminal
_, :false_terminal -> :false_terminal
t, :true_terminal -> t
:true_terminal, t -> t
end
# atom | int
sum_atom_int = Algo.apply(:sum, op_sum, t_atom, t_int)
# The result should be a node that checks is_atom, then if false, checks is_int
# We expect a structure like: if is_atom -> true, else -> t_int
is_atom_node = {Variable.v_is_atom(), true_id, t_int, t_int}
expected_sum_structure_id =
Store.find_or_create_node(
elem(is_atom_node, 0),
elem(is_atom_node, 1),
elem(is_atom_node, 2),
elem(is_atom_node, 3)
)
test("Structure of 'atom | int' is correct", expected_sum_structure_id, sum_atom_int)
# :foo & :bar (structurally, before simplification)
# It should build a tree that checks is_atom, then value==:foo, then value==:bar
# This will be complex, but the key is that it's NOT the false_id yet.
intersect_foo_bar_raw = Algo.apply(:intersect, op_intersect, t_foo, t_bar)
test(":foo & :bar (raw) is not the false node", false, intersect_foo_bar_raw == false_id)
# --- Section: Simplify Algorithm (Flat Types) ---
IO.puts("\n--- Section: Algo.simplify (with Consistency.Engine) ---")
# An impossible structure: node that requires a value to be an atom AND an integer
# This tests the `check_primary_exclusivity` rule.
contradictory_assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
# Simplifying ANYTHING under contradictory assumptions should yield `false`.
simplified_under_contradiction = Algo.simplify(true_id, contradictory_assumptions)
test(
"Simplifying under contradictory assumptions (atom & int) results in false",
false_id,
simplified_under_contradiction
)
# Test implication: A property implies its primary type
# A value being `:foo` implies it is an atom.
assumptions_with_foo = %{Variable.v_atom_eq(:foo) => true}
# If we simplify t_int under this assumption, it should become false.
# The engine expands to `{is_atom: true, value==:foo: true}`. Then it sees that
# the t_int node's variable `is_integer` must be false (from exclusivity rule).
simplified_int_given_foo = Algo.simplify(t_int, assumptions_with_foo)
test(
"Simplifying 'integer' given 'value==:foo' results in false",
false_id,
simplified_int_given_foo
)
# Now, let's simplify the raw intersection of :foo and :bar
simplified_foo_bar = Algo.simplify(intersect_foo_bar_raw, %{})
# The simplify algorithm should discover the contradiction that an atom cannot be
# both :foo and :bar at the same time. (This requires `check_atom_consistency` to be implemented).
# For now, we stub it and test the plumbing.
# Let's test a simpler contradiction that we *have* implemented.
intersect_atom_int_raw = Algo.apply(:intersect, op_intersect, t_atom, t_int)
simplified_atom_int = Algo.simplify(intersect_atom_int_raw, %{})
test("Simplifying 'atom & int' results in false", false_id, simplified_atom_int)
# Test path collapsing
# If we simplify 'atom | int' under the assumption 'is_atom == true', it should become `true`.
simplified_sum_given_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => true})
test(
"Simplifying 'atom | int' given 'is_atom==true' results in true",
true_id,
simplified_sum_given_atom
)
# If we simplify 'atom | int' under the assumption 'is_atom == false', it should become `t_int`.
simplified_sum_given_not_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => false})
test(
"Simplifying 'atom | int' given 'is_atom==false' results in 'integer'",
t_int,
simplified_sum_given_not_atom
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Algo tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
# Optional: print details of failed tests if needed
end
end
end
defmodule TypeReconstructorTests do
alias Tdd.TypeReconstructor
alias Tdd.Variable
alias Tdd.TypeSpec
defp test(name, expected_spec, assumptions) do
# Normalize both expected and result for a canonical comparison
expected = TypeSpec.normalize(expected_spec)
result = TypeSpec.normalize(TypeReconstructor.spec_from_assumptions(assumptions))
is_ok = expected == result
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeReconstructor Tests ---")
Process.put(:test_failures, [])
# --- Section: Basic Flat Reconstructions ---
IO.puts("\n--- Section: Basic Flat Reconstructions ---")
test("is_atom=true -> atom", :atom, %{Variable.v_is_atom() => true})
test("is_atom=false -> ¬atom", {:negation, :atom}, %{Variable.v_is_atom() => false})
test(
"is_atom=true AND value==:foo -> :foo",
{:literal, :foo},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => true}
)
test(
"is_atom=true AND value!=:foo -> atom & ¬:foo",
{:intersect, [:atom, {:negation, {:literal, :foo}}]},
%{Variable.v_is_atom() => true, Variable.v_atom_eq(:foo) => false}
)
test(
"is_integer=true AND int==5 -> 5",
{:literal, 5},
%{Variable.v_is_integer() => true, Variable.v_int_eq(5) => true}
)
test(
"is_list=true AND is_empty=true -> []",
{:literal, []},
%{Variable.v_is_list() => true, Variable.v_list_is_empty() => true}
)
# --- Section: Combined Flat Reconstructions ---
IO.puts("\n--- Section: Combined Flat Reconstructions ---")
test(
"int > 10 AND int < 20",
# This is complex. Our simple reconstructor makes two separate ranges.
# A more advanced one would combine them into a single {:integer_range, 11, 19}.
# For now, we test the current behavior.
{:intersect,
[
:integer,
{:integer_range, 11, :pos_inf},
{:integer_range, :neg_inf, 19}
]},
%{Variable.v_int_gt(10) => true, Variable.v_int_lt(20) => true}
)
# --- Section: Recursive Reconstructions (Simplified) ---
IO.puts("\n--- Section: Recursive Reconstructions ---")
# This tests the partitioning and recursive call logic.
# Our simple implementation of recursive cases means we can only test simple things.
test(
"head is an atom",
{:intersect, [:list, {:cons, :atom, :any}]},
# Assumption for `is_list=true` is implied by `v_list_head_pred`
%{Variable.v_list_head_pred(Variable.v_is_atom()) => true}
)
# Note: The recursive tests are limited by the simplifications made in the
# implementation (e.g., tuple reconstruction). A full implementation would
# require more context (like tuple size) to be passed down.
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All TypeReconstructor tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule CompilerAlgoTests do
alias Tdd.Compiler
alias Tdd.Store
alias Tdd.Algo
# High-level helpers that mimic the final API
# defp is_subtype(spec1, spec2) do
# id1 = Compiler.spec_to_id(spec1)
# id2 = Compiler.spec_to_id(spec2)
# # The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
# neg_id2 = Algo.negate(id2)
#
# op_intersect = fn
# :false_terminal, _ -> :false_terminal
# _, :false_terminal -> :false_terminal
# t, :true_terminal -> t
# :true_terminal, t -> t
# # Default case for non-terminal nodes, though apply handles recursion
# _t1, _t2 -> :non_terminal
# end
#
# intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
# final_id = Algo.simplify(intersect_id)
# final_id == Store.false_node_id()
# end
defp are_equivalent(spec1, spec2) do
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
end
defp is_contradiction(spec) do
Compiler.spec_to_id(spec) == Store.false_node_id()
end
defp test_subtype(name, expected, s1, s2), do: test(name, expected, Compiler.is_subtype(s1, s2))
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1))
defp test(name, exp, res) do
is_ok = exp == res
status = if is_ok, do: "[PASS]", else: "[FAIL]"
IO.puts("#{status} #{name}")
unless is_ok do
IO.puts(" Expected: #{inspect(exp)}")
IO.puts(" Got: #{inspect(res)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
def run() do
IO.puts("\n--- Running Compiler & Algo Integration Tests ---")
Process.put(:test_failures, [])
# Setup
Tdd.Store.init()
# --- Section: Basic Compilation & Equivalence ---
IO.puts("\n--- Section: Basic Equivalences ---")
test_equiv("atom & any == atom", true, {:intersect, [:atom, :any]}, :atom)
test_equiv("atom | none == atom", true, {:union, [:atom, :none]}, :atom)
test_equiv("atom & int == none", true, {:intersect, [:atom, :integer]}, :none)
test_equiv("¬(¬atom) == atom", true, {:negation, {:negation, :atom}}, :atom)
test_equiv("atom | atom == atom", true, {:union, [:atom, :atom]}, :atom)
# --- Section: Basic Subtyping ---
IO.puts("\n--- Section: Basic Subtyping ---")
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
test_subtype("none <: atom", true, :none, :atom)
test_subtype("atom <: any", true, :atom, :any)
# --- Section: Integer Range Logic ---
IO.puts("\n--- Section: Integer Range Logic ---")
int_5_to_10 = {:integer_range, 5, 10}
int_7_to_8 = {:integer_range, 7, 8}
int_15_to_20 = {:integer_range, 15, 20}
int_0_to_100 = {:integer_range, 0, 100}
test_subtype("range(7..8) <: range(5..10)", true, int_7_to_8, int_5_to_10)
test_subtype("range(5..10) <: range(7..8)", false, int_5_to_10, int_7_to_8)
test_subtype("range(5..10) <: range(15..20)", false, int_5_to_10, int_15_to_20)
test_equiv(
"range(5..10) & range(7..8) == range(7..8)",
true,
{:intersect, [int_5_to_10, int_7_to_8]},
int_7_to_8
)
test_equiv(
"range(5..10) & range(0..100) == range(5..10)",
true,
{:intersect, [int_5_to_10, int_0_to_100]},
int_5_to_10
)
test_equiv(
"range(5..10) | range(7..8) == range(5..10)",
true,
{:union, [int_5_to_10, int_7_to_8]},
int_5_to_10
)
# --- Section: Contradictions & Simplifications ---
IO.puts("\n--- Section: Contradictions & Simplifications ---")
test_contradiction("atom & integer").({:intersect, [:atom, :integer]})
test_contradiction(":foo & :bar").({:intersect, [{:literal, :foo}, {:literal, :bar}]})
test_contradiction("atom & (int==5)").({:intersect, [:atom, {:literal, 5}]})
test_contradiction("range(5..10) & range(15..20)").({:intersect, [int_5_to_10, int_15_to_20]})
test_contradiction("integer & ¬integer").({:intersect, [:integer, {:negation, :integer}]})
# --- Section: Subtype Reduction in Normalization ---
IO.puts("\n--- Section: Subtype Reduction Logic ---")
test_equiv(
"(:foo | :bar | atom) simplifies to atom",
true,
{:union, [{:literal, :foo}, {:literal, :bar}, :atom]},
:atom
)
test_equiv(
"(range(5..10) | integer) simplifies to integer",
true,
{:union, [int_5_to_10, :integer]},
:integer
)
test_equiv(
"(:foo & atom) simplifies to :foo",
true,
{:intersect, [{:literal, :foo}, :atom]},
{:literal, :foo}
)
test_equiv(
"(range(5..10) & integer) simplifies to range(5..10)",
true,
{:intersect, [int_5_to_10, :integer]},
int_5_to_10
)
# --- Section: Logical Laws (Distribution and De Morgan's) ---
IO.puts("\n--- Section: Logical Laws ---")
# De Morgan's Law: ¬(A | B) == (¬A & ¬B)
spec_not_a_or_b = {:negation, {:union, [:atom, :integer]}}
spec_not_a_and_not_b = {:intersect, [{:negation, :atom}, {:negation, :integer}]}
test_equiv(
"De Morgan's (¬(A|B) == ¬A & ¬B) holds",
true,
spec_not_a_or_b,
spec_not_a_and_not_b
)
# De Morgan's Law: ¬(A & B) == (¬A | ¬B)
spec_not_a_and_b = {:negation, {:intersect, [{:literal, :foo}, int_5_to_10]}}
spec_not_a_or_not_b = {:union, [{:negation, {:literal, :foo}}, {:negation, int_5_to_10}]}
test_equiv(
"De Morgan's (¬(A&B) == ¬A | ¬B) holds",
true,
spec_not_a_and_b,
spec_not_a_or_not_b
)
# Distributive Law: A & (B | C) == (A & B) | (A & C)
spec_a = :integer
spec_b = {:integer_range, 0, 10}
spec_c = {:integer_range, 20, 30}
spec_dist_lhs = {:intersect, [spec_a, {:union, [spec_b, spec_c]}]}
spec_dist_rhs = {:union, [{:intersect, [spec_a, spec_b]}, {:intersect, [spec_a, spec_c]}]}
test_equiv(
"Distributive Law (A & (B|C)) holds",
true,
spec_dist_lhs,
spec_dist_rhs
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Compiler & Algo Integration tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
end
defmodule TddCompilerRecursiveTests do
alias Tdd.Compiler
alias Tdd.Store
alias Tdd.Algo
alias Tdd.TypeSpec
# --- Test Runner ---
def run() do
IO.puts("\n--- Running Tdd.Compiler Recursive Type Tests ---")
Process.put(:test_failures, [])
Tdd.Store.init()
# --- Tests for :cons ---
IO.puts("\n--- Section: :cons ---")
test_subtype(":cons is a subtype of :list", true, {:cons, :atom, :list}, :list)
test_subtype(
":cons is not a subtype of the empty list",
false,
{:cons, :atom, :list},
{:literal, []}
)
test_subtype(
"cons(integer, list) is a subtype of cons(any, any)",
true,
{:cons, :integer, :list},
{:cons, :any, :any}
)
test_subtype(
"cons(any, any) is not a subtype of cons(integer, list)",
false,
{:cons, :any, :any},
{:cons, :integer, :list}
)
# --- Tests for :tuple ---
IO.puts("\n--- Section: :tuple ---")
test_subtype(
"{:tuple, [atom, int]} is a subtype of :tuple",
true,
{:tuple, [:atom, :integer]},
:tuple
)
test_subtype(
"{:tuple, [atom, int]} is not a subtype of :list",
false,
{:tuple, [:atom, :integer]},
:list
)
test_subtype(
"a tuple of size 2 is not a subtype of a tuple of size 3",
false,
{:tuple, [:atom, :integer]},
{:tuple, [:atom, :integer, :list]}
)
spec_specific = {:tuple, [{:literal, :a}, {:literal, 1}]}
spec_general = {:tuple, [:atom, :integer]}
spec_unrelated = {:tuple, [:list, :list]}
test_subtype(
"subtype check works element-wise (specific <: general)",
true,
spec_specific,
spec_general
)
test_subtype(
"subtype check works element-wise (general </: specific)",
false,
spec_general,
spec_specific
)
test_subtype(
"subtype check works element-wise (specific </: unrelated)",
false,
spec_specific,
spec_unrelated
)
# --- Tests for :list_of ---
IO.puts("\n--- Section: :list_of ---")
test_subtype("list_of(E) is a subtype of list", true, {:list_of, :integer}, :list)
test_subtype(
"empty list is a subtype of any list_of(E)",
true,
{:literal, []},
{:list_of, :integer}
)
test_subtype(
"list_of(subtype) is a subtype of list_of(supertype)",
true,
{:list_of, {:literal, 1}},
{:list_of, :integer}
)
# Tdd.Debug.run(fn ->
test_subtype(
"list_of(supertype) is not a subtype of list_of(subtype)",
false,
{:list_of, :integer},
{:list_of, {:literal, 1}}
)
# end)
list_with_int = {:cons, :integer, {:literal, []}}
list_of_atoms = {:list_of, :atom}
test_subtype(
"a list with a wrong element type is not a subtype of list_of(E)",
false,
list_with_int,
list_of_atoms
)
list_with_atom = {:cons, :atom, {:literal, []}}
# Tdd.Debug.run(fn ->
test_subtype(
"a list with a correct element type is a subtype of list_of(E)",
true,
list_with_atom,
list_of_atoms
)
# end)
# --- Equivalence tests ---
IO.puts("\n--- Section: Equivalence ---")
e_spec = :integer
list_of_e = {:list_of, e_spec}
recursive_def = TypeSpec.normalize({:union, [{:literal, []}, {:cons, e_spec, list_of_e}]})
test_equiv(
"the recursive definition holds: list_of(E) == [] | cons(E, list_of(E))",
true,
list_of_e,
recursive_def
)
int_range_1 = {:integer_range, 0, 10}
int_range_2 = {:integer_range, 5, 15}
int_range_intersect = {:integer_range, 5, 10}
list_of_1 = {:list_of, int_range_1}
list_of_2 = {:list_of, int_range_2}
list_of_intersect = {:list_of, int_range_intersect}
combined_spec = {:intersect, [list_of_1, list_of_2]}
test_equiv(
"intersection of two list_of types works correctly",
true,
combined_spec,
list_of_intersect
)
# --- Final Report ---
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Compiler Recursive Type tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} test failures.")
end
end
# --- Private Test Helpers ---
defp test(name, expected, result) do
if expected == result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
defp test_equiv(name, expected, spec1, spec2) do
result = Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
test(name, expected, result)
end
defp test_subtype(name, expected, spec1, spec2) do
result = Compiler.is_subtype(spec1, spec2)
test(name, expected, result)
end
# --- Private Logic Helpers ---
defp do_is_subtype(spec1, spec2) do
id1 = Compiler.spec_to_id(spec1)
id2 = Compiler.spec_to_id(spec2)
# A <: B <=> A & ~B == none
intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, Algo.negate(id2))
final_id = Algo.simplify(intersect_id)
final_id == Store.false_node_id()
end
defp op_intersect_terminals(u1_details, u2_details) do
case {u1_details, u2_details} do
{:false_terminal, _} -> :false_terminal
{_, :false_terminal} -> :false_terminal
{:true_terminal, t2} -> t2
{t1, :true_terminal} -> t1
end
end
end
# test/tdd/type_spec_advanced_tests.exs
defmodule Tdd.TypeSpecAdvancedTests do
alias Tdd.TypeSpec
# Test helper for regular normalization checks
defp test(name, expected_spec, input_spec) do
current_failures = Process.get(:test_failures, [])
normalized_result = TypeSpec.normalize(input_spec)
is_equal = expected_spec == normalized_result
if is_equal do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Input: #{inspect(input_spec)}")
IO.puts(" Expected: #{inspect(expected_spec)}")
IO.puts(" Got: #{inspect(normalized_result)}")
Process.put(:test_failures, [name | current_failures])
end
end
# Test helper for checking raised errors (same as before)
defp test_raise(name, expected_error_struct, expected_message_regex, input_spec) do
current_failures = Process.get(:test_failures, [])
error_caught =
try do
TypeSpec.normalize(input_spec)
# No error caught
nil
rescue
e ->
if is_struct(e, expected_error_struct) do
e
else
# Catch other errors differently
{:unexpected_error, e}
end
end
cond do
is_nil(error_caught) ->
IO.puts("[FAIL] #{name}")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got: No error")
Process.put(:test_failures, [name | current_failures])
match?({:unexpected_error, _}, error_caught) ->
{:unexpected_error, e} = error_caught
IO.puts("[FAIL] #{name} (Unexpected Error Type)")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got error: #{inspect(e)}")
Process.put(:test_failures, [name | current_failures])
(is_struct(error_caught, expected_error_struct) or
(is_tuple(error_caught) and elem(error_caught, 0) == expected_error_struct)) and
(is_nil(expected_message_regex) or
String.match?(Exception.message(error_caught), expected_message_regex)) ->
IO.puts("[PASS] #{name}")
true ->
IO.puts("[FAIL] #{name}")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got error: #{inspect(error_caught)}")
IO.puts(" Message: #{inspect(Exception.message(error_caught))}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.TypeSpec Advanced Normalization Tests ---")
Process.put(:test_failures, [])
# --- Section: μ-type (Recursive Type) Normalization ---
IO.puts("\n--- Section: μ-type (Recursive Type) Normalization ---")
test(
"basic alpha-conversion for μ-variable",
{:mu, :m_var0, {:type_var, :m_var0}},
{:mu, :X, {:type_var, :X}}
)
test(
"alpha-conversion with nested μ-variable (depth-first renaming)",
# Body union already sorted by var name
{:mu, :m_var0, {:mu, :m_var1, {:union, [{:type_var, :m_var0}, {:type_var, :m_var1}]}}},
{:mu, :X, {:mu, :Y, {:union, [{:type_var, :X}, {:type_var, :Y}]}}}
)
test(
"alpha-conversion order matters for canonical names (1)",
# With subtype reduction deferred, both branches should survive pass1 and get canonical names.
# The final apply_subtype_reduction will sort the outer union.
# Let's assume m1_spec sorts before m2_spec if they are different after canonical renaming.
# Expected: m0 ( union(m1(m0|m1) , m2(m0|m2)) ) - sort order of m1_spec and m2_spec depends on their full structure.
# Since :m_var1 and :m_var2 are the only difference, the one with :m_var1 comes first.
{:mu, :m_var0,
{:union,
[
# Inner union sorted
{:mu, :m_var1, {:union, [{:type_var, :m_var0}, {:type_var, :m_var1}]}},
# Inner union sorted
{:mu, :m_var2, {:union, [{:type_var, :m_var0}, {:type_var, :m_var2}]}}
]}},
{:mu, :A,
{:union,
[
{:mu, :B, {:union, [{:type_var, :A}, {:type_var, :B}]}},
{:mu, :C, {:union, [{:type_var, :A}, {:type_var, :C}]}}
]}}
)
test(
"body of μ-type is normalized (sorted union, duplicates removed by final pass)",
# apply_subtype_reduction will sort and unique the union members.
# :integer then :type_var
{:mu, :m_var0, {:union, [:integer, {:type_var, :m_var0}]}},
{:mu, :Rec, {:union, [:integer, {:type_var, :Rec}, :integer, :none]}}
)
test(
"list_of(integer) normalizes to a μ-expression with canonical var",
# Expected: {:literal, []} sorts before {:cons, ...}
{:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}},
{:list_of, :integer}
)
test(
"list_of(list_of(atom)) normalizes to nested μ-expressions",
{:mu, :m_var0,
{:union,
[
# Sorted first
{:literal, []},
{:cons,
{
:mu,
:m_var1,
# Inner sorted
{:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var1}}]}
}, {:type_var, :m_var0}}
]}},
{:list_of, {:list_of, :atom}}
)
# --- Section: Λ-type (Type Lambda) Normalization ---
IO.puts("\n--- Section: Λ-type (Type Lambda) Normalization ---")
test(
"basic alpha-conversion for Λ-parameters",
{:type_lambda, [:lambda_var0], {:type_var, :lambda_var0}},
{:type_lambda, [:T], {:type_var, :T}}
)
test(
"multiple Λ-parameters alpha-converted (sorted by canonical name)",
{:type_lambda, [:lambda_var0, :lambda_var1],
{:tuple, [{:type_var, :lambda_var0}, {:type_var, :lambda_var1}]}},
{:type_lambda, [:K, :V], {:tuple, [{:type_var, :K}, {:type_var, :V}]}}
)
test(
"body of Λ-type is normalized",
# :integer then :type_var
{:type_lambda, [:lambda_var0], {:union, [:integer, {:type_var, :lambda_var0}]}},
{:type_lambda, [:T], {:union, [:integer, {:type_var, :T}, :integer, :none]}}
)
test(
"nested lambda alpha-conversion (depth-first renaming)",
{:type_lambda, [:lambda_var0],
{:type_lambda, [:lambda_var1],
{:tuple, [{:type_var, :lambda_var0}, {:type_var, :lambda_var1}]}}},
{:type_lambda, [:X], {:type_lambda, [:Y], {:tuple, [{:type_var, :X}, {:type_var, :Y}]}}}
)
test(
"lambda and mu interaction for canonical names",
# Union body: {:type_var, :lambda_var0} sorts before {:type_var, :m_var0}
{:type_lambda, [:lambda_var0],
{:mu, :m_var0, {:union, [{:type_var, :lambda_var0}, {:type_var, :m_var0}]}}},
{:type_lambda, [:T], {:mu, :X, {:union, [{:type_var, :T}, {:type_var, :X}]}}}
)
test(
"mu and lambda interaction for canonical names",
# Union body: {:type_var, :lambda_var0} sorts before {:type_var, :m_var0}
{:mu, :m_var0,
{:type_lambda, [:lambda_var0], {:union, [{:type_var, :lambda_var0}, {:type_var, :m_var0}]}}},
{:mu, :X, {:type_lambda, [:T], {:union, [{:type_var, :X}, {:type_var, :T}]}}}
)
# --- Section: Type Application Normalization (Beta-Reduction) ---
IO.puts("\n--- Section: Type Application Normalization (Beta-Reduction) ---")
test(
"simple application: (ΛT.T) integer -> integer",
:integer,
{:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]}
)
test(
"application with structure: (ΛT. list_of(T)) atom -> list_of(atom) (normalized form)",
# Expected: normalized form of list_of(atom)
{:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}},
{:type_apply, {:type_lambda, [:T], {:list_of, {:type_var, :T}}}, [:atom]}
)
test(
"application with multiple args: (ΛK,V. {K,V}) atom,integer -> {atom,integer}",
{:tuple, [:atom, :integer]},
{:type_apply, {:type_lambda, [:K, :V], {:tuple, [{:type_var, :K}, {:type_var, :V}]}},
[:atom, :integer]}
)
test(
"application resulting in a mu-type that needs further normalization after substitution",
# Union: :integer then :type_var
{:mu, :m_var0, {:union, [:integer, {:type_var, :m_var0}]}},
{:type_apply, {:type_lambda, [:T], {:mu, :X, {:union, [{:type_var, :T}, {:type_var, :X}]}}},
[:integer]}
)
test_raise(
"arity mismatch in application raises error (too many args)",
RuntimeError,
~r/Arity mismatch/,
{:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer, :atom]}
)
test_raise(
"arity mismatch in application raises error (too few args)",
RuntimeError,
~r/Arity mismatch/,
{:type_apply, {:type_lambda, [:T, :U], {:type_var, :T}}, [:integer]}
)
test(
"application of non-lambda (e.g. type_var) remains unreduced",
{:type_apply, {:type_var, :F}, [:integer]},
{:type_apply, {:type_var, :F}, [:integer]}
)
test(
"substitution avoids capture of free variable in argument by inner lambda",
{:type_lambda, [:lambda_var0], {:tuple, [{:type_var, :Y_free}, {:type_var, :lambda_var0}]}},
(
y_free_spec = {:type_var, :Y_free}
inner_lambda =
{:type_lambda, [:Y_bound], {:tuple, [{:type_var, :X}, {:type_var, :Y_bound}]}}
outer_lambda = {:type_lambda, [:X], inner_lambda}
{:type_apply, outer_lambda, [y_free_spec]}
)
)
test(
"application with nested lambdas and mus, complex substitution and renaming",
# Expected: list_of(atom) normalized
{:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}},
(
id_lambda = {:type_lambda, [:Tparam], {:type_var, :Tparam}}
list_lambda = {:type_lambda, [:Eparam], {:list_of, {:type_var, :Eparam}}}
id_applied_to_atom = {:type_apply, id_lambda, [:atom]}
{:type_apply, list_lambda, [id_applied_to_atom]}
)
)
# --- Section: Interaction with Union/Intersection ---
IO.puts("\n--- Section: Interaction with Union/Intersection ---")
test(
"union members are normalized with mu and apply (sorted result)",
# :atom sorts before :integer
{:union, [:atom, :integer]},
{:union, [:integer, {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:atom]}]}
)
# list_of_int_normalized will be {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}}
list_of_int_normalized = TypeSpec.normalize({:list_of, :integer})
test(
"list_of inside a union gets normalized and union sorted",
# :atom sorts before {:mu, ...}
{:union, [:atom, list_of_int_normalized]},
{:union, [:atom, {:list_of, :integer}]}
)
test(
"Normalization of type_apply within intersection, then intersection simplification",
:integer,
{:intersect, [:any, {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]}]}
)
# Final Report
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.TypeSpec Advanced Normalization tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} Tdd.TypeSpec Advanced Normalization test failures:")
Enum.each(Enum.reverse(failures), &IO.puts(" - #{&1}"))
end
length(failures) == 0
end
end
defmodule Tdd.CompilerAdvancedTests do
alias Tdd.Compiler
alias Tdd.Store
# For constructing specs
alias Tdd.TypeSpec
# Helper to run tests
defp test(name, expected, actual_fun_call_result) do
# For subtyping, expected is boolean. For equivalence, compare TDD IDs.
if expected == actual_fun_call_result do
IO.puts("[PASS] #{name}")
else
IO.puts("[FAIL] #{name}")
IO.puts(" Expected: #{inspect(expected)}")
IO.puts(" Got: #{inspect(actual_fun_call_result)}")
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
end
end
defp test_subtype(name, expected_bool, spec1, spec2) do
test(name, expected_bool, Tdd.Compiler.is_subtype(spec1, spec2))
end
defp test_equivalent_tdd(name, spec1, spec2) do
id1 = Tdd.Compiler.spec_to_id(spec1)
id2 = Tdd.Compiler.spec_to_id(spec2)
# Test that their TDD IDs are the same
test(name, id1, id2)
end
defp test_compiles_to_none(name, spec) do
id = Tdd.Compiler.spec_to_id(spec)
test(name, Tdd.Store.false_node_id(), id)
end
defp test_compiles_to_any(name, spec) do
id = Tdd.Compiler.spec_to_id(spec)
test(name, Tdd.Store.true_node_id(), id)
end
defp test_raise_compile(name, expected_error_struct, expected_message_regex, input_spec) do
current_failures = Process.get(:test_failures, [])
error_caught =
try do
Tdd.Compiler.spec_to_id(input_spec)
# No error
nil
rescue
e ->
# RuntimeError is {RuntimeError, %{message: "..."}}
if is_struct(e, expected_error_struct) or
(is_tuple(e) and elem(e, 0) == expected_error_struct and
expected_error_struct == RuntimeError) do
e
else
{:unexpected_error, e}
end
end
cond do
is_nil(error_caught) ->
IO.puts("[FAIL] #{name}")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got: No error")
Process.put(:test_failures, [name | current_failures])
match?({:unexpected_error, _}, error_caught) ->
{:unexpected_error, e} = error_caught
IO.puts("[FAIL] #{name} (Unexpected Error Type)")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got error: #{inspect(e)}")
Process.put(:test_failures, [name | current_failures])
(is_struct(error_caught, expected_error_struct) or
(is_tuple(error_caught) and elem(error_caught, 0) == expected_error_struct and
expected_error_struct == RuntimeError)) and
(is_nil(expected_message_regex) or
String.match?(Exception.message(error_caught), expected_message_regex)) ->
IO.puts("[PASS] #{name}")
true ->
IO.puts("[FAIL] #{name}")
IO.puts(
" Expected error: #{inspect(expected_error_struct)} matching ~r/#{inspect(expected_message_regex)}/"
)
IO.puts(" Got error: #{inspect(error_caught)}")
IO.puts(" Message: #{inspect(Exception.message(error_caught))}")
Process.put(:test_failures, [name | current_failures])
end
end
def run() do
IO.puts("\n--- Running Tdd.Compiler Advanced Feature Tests (μ, Λ, Apply) ---")
Process.put(:test_failures, [])
# Ensure store is fresh for each run module if tests are separate
Tdd.Store.init()
# --- Section: Basic μ-type compilation (via list_of normalization) ---
IO.puts("\n--- Section: Basic μ-type (list_of) ---")
int_list = {:list_of, :integer}
any_list = {:list_of, :any}
atom_list = {:list_of, :atom}
test_subtype("list_of(integer) <: list_of(any)", true, int_list, any_list)
test_subtype("list_of(any) <: list_of(integer)", false, any_list, int_list)
test_subtype("list_of(integer) <: list_of(atom)", false, int_list, atom_list)
# Test equivalence of list_of(E) and its explicit μ unfolding
# TypeSpec.normalize will convert list_of to mu. So this tests mu compilation.
# This is already {:mu, :m_var0, ...}
list_of_int_norm = TypeSpec.normalize(int_list)
test_equivalent_tdd(
"list_of(int) TDD is equivalent to its normalized mu-form TDD",
int_list,
list_of_int_norm
)
test_subtype("[] <: list_of(integer)", true, {:literal, []}, int_list)
# Tdd.Debug.run(fn ->
test_subtype(
"cons(1, []) <: list_of(integer)",
true,
{:cons, {:literal, 1}, {:literal, []}},
int_list
)
# end)
test_subtype(
"cons(:a, []) !<: list_of(integer)",
false,
{:cons, {:literal, :a}, {:literal, []}},
int_list
)
test_subtype(
"cons(1, cons(:a, [])) !<: list_of(integer)",
false,
{:cons, {:literal, 1}, {:cons, {:literal, :a}, {:literal, []}}},
int_list
)
# --- Section: Explicit μ-types ---
IO.puts("\n--- Section: Explicit μ-types ---")
# Binary Tree of Atoms: Tree = μX. (Empty | Node<atom, X, X>)
# Using :empty_tree for the literal leaf for clarity.
leaf_node = {:literal, :empty_tree}
tree_spec =
{:mu, :Tree,
{:union,
[
leaf_node,
{:tuple, [:atom, {:type_var, :Tree}, {:type_var, :Tree}]}
]}}
# Check it compiles without error first
_tree_tdd_id = Tdd.Compiler.spec_to_id(tree_spec)
test("Explicit mu-type (atom tree) compiles", true, is_integer(_tree_tdd_id))
# An instance of the tree
simple_tree_instance = {:tuple, [{:literal, :a}, leaf_node, leaf_node]}
test_subtype("Simple atom tree instance <: AtomTree", true, simple_tree_instance, tree_spec)
# Integer instead of atom
non_tree_instance = {:tuple, [{:literal, 123}, leaf_node, leaf_node]}
test_subtype("Non-atom tree instance </: AtomTree", false, non_tree_instance, tree_spec)
# Mutual recursion: Even = μE. (zero | Odd-1), Odd = μO. (Even-1) - Simplified to just Even/Odd states
# Even = 0 | Succ<Odd> ; Odd = Succ<Even>
# Let's use a simpler form: Stream<T> = μS. cons(T, S)
int_stream_spec = {:mu, :S, {:cons, :integer, {:type_var, :S}}}
_int_stream_id = Tdd.Compiler.spec_to_id(int_stream_spec)
test("μ-type for int stream compiles", true, is_integer(_int_stream_id))
test_subtype(
"cons(1, cons(2, Stream)) <: Stream",
true,
{:cons, {:literal, 1}, {:cons, {:literal, 2}, int_stream_spec}},
int_stream_spec
)
test_subtype(
"cons(:a, Stream) </: Stream",
false,
{:cons, {:literal, :a}, int_stream_spec},
int_stream_spec
)
# Equivalence: μX.T vs μY.T[Y/X] (alpha equivalence handled by normalize, TDDs should match)
mu_x_int = {:mu, :X, {:union, [{:literal, 0}, {:cons, :integer, {:type_var, :X}}]}}
mu_y_int = {:mu, :Y, {:union, [{:literal, 0}, {:cons, :integer, {:type_var, :Y}}]}}
test_equivalent_tdd("Alpha-equivalent μ-types have same TDD", mu_x_int, mu_y_int)
# --- Section: Polymorphism (Λ, Apply) ---
IO.puts("\n--- Section: Polymorphism (Λ, Apply) ---")
# Types that should be fully resolved by TypeSpec.normalize before TDD compilation
# Generic List Constructor: GenList = ΛT. μL. ([] | cons(T, L))
# This is effectively what list_of(T) becomes after normalization.
gen_list_lambda = {:type_lambda, [:Tparam], {:list_of, {:type_var, :Tparam}}}
# Apply to integer: GenList<integer>
# This should normalize to the same spec as list_of(integer)
list_of_int_from_apply = {:type_apply, gen_list_lambda, [:integer]}
test_equivalent_tdd(
"(ΛT. list_of(T))<integer> TDD == list_of(integer) TDD",
list_of_int_from_apply,
# which is {:list_of, :integer}
int_list
)
# Apply to atom: GenList<atom>
list_of_atom_from_apply = {:type_apply, gen_list_lambda, [:atom]}
test_equivalent_tdd(
"(ΛT. list_of(T))<atom> TDD == list_of(atom) TDD",
list_of_atom_from_apply,
# which is {:list_of, :atom}
atom_list
)
# Test subtyping between instantiated types
test_subtype(
"(ΛT. list_of(T))<integer> <: (ΛT. list_of(T))<any>",
true,
list_of_int_from_apply,
{:type_apply, gen_list_lambda, [:any]}
)
# Identity function: ID = ΛT. T
id_lambda = {:type_lambda, [:T], {:type_var, :T}}
# Normalizes to :atom
id_applied_to_atom = {:type_apply, id_lambda, [:atom]}
test_equivalent_tdd("(ΛT.T)<atom> TDD == :atom TDD", id_applied_to_atom, :atom)
# --- Section: Error Handling in Compiler ---
IO.puts("\n--- Section: Error Handling in Compiler ---")
test_raise_compile(
"Compiling a raw :type_lambda errors",
RuntimeError,
~r/Cannot compile :type_lambda directly/,
# This is already normalized
{:type_lambda, [:T], {:type_var, :T}}
)
# TypeSpec.normalize will leave this as is if F is free.
test_raise_compile(
"Compiling an unreduced :type_apply (with free constructor var) errors",
RuntimeError,
~r/Cannot compile :type_apply directly/,
{:type_apply, {:type_var, :F}, [:integer]}
)
# An unbound type variable not bound by any mu
# Normalization will keep {:type_var, :Unbound} as is if it's free.
# The error should come from `compile_normalized_spec` for the `{:type_var, name}` case.
test_raise_compile(
"Compiling an unbound :type_var errors",
RuntimeError,
~r/Unbound type variable/,
{:type_var, :SomeUnboundVar}
)
# A :type_var that is bound by a mu, but the mu is inside a lambda that isn't applied.
# e.g. ΛT. (μX. X) -- this is fine to normalize. Compiling it is an error.
spec_lambda_mu_var = {:type_lambda, [:T], {:mu, :X, {:type_var, :X}}}
test_raise_compile(
"Compiling lambda containing mu (still a lambda) errors",
RuntimeError,
~r/Cannot compile :type_lambda directly/,
spec_lambda_mu_var
)
# Final Report
failures = Process.get(:test_failures, [])
if failures == [] do
IO.puts("\n✅ All Tdd.Compiler Advanced Feature tests passed!")
else
IO.puts("\n❌ Found #{length(failures)} Tdd.Compiler Advanced Feature test failures:")
Enum.each(Enum.reverse(failures), &IO.puts(" - #{&1}"))
end
length(failures) == 0
end
end
Process.sleep(100)
TypeSpecTests.run()
TddStoreTests.run()
TddVariableTests.run()
TddAlgoTests.run()
ConsistencyEngineTests.run()
TypeReconstructorTests.run()
CompilerAlgoTests.run()
# TddCompilerRecursiveTests.run()
Tdd.TypeSpecAdvancedTests.run()
Tdd.CompilerAdvancedTests.run()
# Tdd.Compiler.is_subtype(
# {:list_of, {:literal, 1}},
# {:list_of, :integer}
# )
# |> IO.inspect(label: "TEST RESULT")
#
# Tdd.Compiler.spec_to_id( {:list_of, {:literal, 1}})
# |> IO.inspect(label: "list of 1 ")
# |> Tdd.Debug.print_tdd_graph()
#
#
# Tdd.Compiler.spec_to_id(
# {:list_of, :integer}
# )
# |> IO.inspect(label: "list of int ")
# |> Tdd.Debug.print_tdd_graph()