checkpoint
This commit is contained in:
parent
a5e5127bcd
commit
130d369734
775
new.exs
775
new.exs
@ -615,175 +615,243 @@ end
|
||||
# in a new file, e.g., lib/tdd/consistency/engine.ex
|
||||
defmodule Tdd.Consistency.Engine do
|
||||
@moduledoc """
|
||||
A rule-based engine for checking the semantic consistency of a set of assumptions.
|
||||
Placeholders: The check_atom_consistency and check_integer_consistency functions are placeholders. They would need to be filled in with the logic for checking unique values and valid ranges, respectively.
|
||||
A rule-based engine for checking the semantic consistency of a set of assumptions.
|
||||
|
||||
The Circular Dependency: The check_recursive_consistency function highlights the deepest challenge. To properly check if a head's assumed type is compatible with a list_of(X) constraint, the consistency engine needs to ask the TDD compiler: "What is the TDD for the type implied by these head assumptions?" and "What is the TDD for X?", and then call is_subtype. This creates a cycle: Algo.simplify -> Engine.check -> Compiler.spec_to_id -> Algo.simplify.
|
||||
This engine is the "oracle" for the `Tdd.Algo.simplify/2` function. It takes
|
||||
a set of assumptions about predicate variables (e.g., `{is_atom, true}`,
|
||||
`{value == :foo, true}`) and determines if that set is logically consistent.
|
||||
|
||||
Breaking the Cycle: The standard way to break this cycle is to pass a "compiler handle" or context down through the calls. Engine.check wouldn't call the top-level compiler, but a recursive version that knows it's already inside a compilation. For now, we will leave this logic incomplete, as fully implementing it is a major task. The key is that we have isolated where this hard problem lives.
|
||||
The process involves two main steps:
|
||||
1. **Expansion**: The initial assumptions are expanded with all their logical
|
||||
implications until a fixed point is reached. For example, `{value == :foo, true}`
|
||||
implies `{is_atom, true}`.
|
||||
2. **Flat Check**: The fully expanded set of assumptions is checked against a
|
||||
series of rules for contradictions (e.g., a value cannot be both an atom
|
||||
and an integer).
|
||||
|
||||
The difficult problem of recursive consistency (checking sub-problems like a
|
||||
list's head against an ambient constraint) is currently disabled, as it creates
|
||||
a logical cycle with the TDD compiler.
|
||||
"""
|
||||
|
||||
alias Tdd.Predicate.Info
|
||||
alias Tdd.TypeReconstructor
|
||||
alias Tdd.Compiler
|
||||
alias Tdd.Variable
|
||||
|
||||
@doc "Checks if a map of assumptions is logically consistent."
|
||||
@doc """
|
||||
Checks if a map of assumptions is logically consistent.
|
||||
|
||||
Returns `:consistent` or `:contradiction`.
|
||||
"""
|
||||
@spec check(map()) :: :consistent | :contradiction
|
||||
def check(assumptions) do
|
||||
with {:ok, expanded} <- expand_with_implications(assumptions),
|
||||
:ok <- check_flat_consistency(expanded),
|
||||
:ok <- check_recursive_consistency(expanded) do
|
||||
:ok <- check_flat_consistency(expanded) do
|
||||
# --- RECURSIVE CHECK DISABLED ---
|
||||
# The call to `check_recursive_consistency/2` is disabled because it
|
||||
# introduces a logical cycle:
|
||||
# simplify -> check -> spec_to_id -> simplify
|
||||
# Solving this requires a more advanced architecture, such as passing a
|
||||
# compiler context/handle to break the recursion. For now, we rely
|
||||
# on the power of the main `simplify` algorithm to handle these recursive
|
||||
# constraints by construction, and we focus on robust flat checks.
|
||||
# :ok <- check_recursive_consistency(expanded, assumptions)
|
||||
:consistent
|
||||
else
|
||||
:error -> :contradiction
|
||||
end
|
||||
end
|
||||
|
||||
# Expands the assumption set by recursively adding all implied truths.
|
||||
defp expand_with_implications(assumptions, new_implications \\ true) do
|
||||
if new_implications == false do
|
||||
{:ok, assumptions}
|
||||
else
|
||||
{next_assumptions, added_new} =
|
||||
Enum.reduce(assumptions, {assumptions, false}, fn
|
||||
{var, true}, {acc, changed} ->
|
||||
case Info.get_traits(var) do
|
||||
%{implies: implies_list} ->
|
||||
Enum.reduce(implies_list, {acc, changed}, fn {implied_var, implied_val},
|
||||
{inner_acc, inner_changed} ->
|
||||
case Map.get(inner_acc, implied_var) do
|
||||
nil -> {Map.put(inner_acc, implied_var, implied_val), true}
|
||||
^implied_val -> {inner_acc, inner_changed}
|
||||
# Mark for immediate failure
|
||||
_ -> {Map.put(inner_acc, :__contradiction__, true), true}
|
||||
end
|
||||
end)
|
||||
# --- Step 1: Implication Expansion ---
|
||||
|
||||
_ ->
|
||||
{acc, changed}
|
||||
end
|
||||
@doc "Expands an assumption map with all its logical implications."
|
||||
defp expand_with_implications(assumptions) do
|
||||
# Start with the initial set of assumptions and expand until a fixed point is reached.
|
||||
expand_loop(assumptions, assumptions)
|
||||
end
|
||||
|
||||
_other_assumption, acc ->
|
||||
acc
|
||||
end)
|
||||
defp expand_loop(new_assumptions, all_assumptions) do
|
||||
# For each of the newly added assumptions, find its implications.
|
||||
implications =
|
||||
Enum.flat_map(new_assumptions, fn
|
||||
# An assumption `var == true` may have implications.
|
||||
{var, true} -> Map.get(Info.get_traits(var) || %{}, :implies, [])
|
||||
# An assumption `var == false` has no simple implications in our current model.
|
||||
_ -> []
|
||||
end)
|
||||
|
||||
if Map.has_key?(next_assumptions, :__contradiction__) do
|
||||
# Attempt to merge the new implications into the set of all assumptions.
|
||||
case Enum.reduce(implications, {:ok, %{}}, fn {implied_var, implied_val},
|
||||
acc ->
|
||||
reduce_implication({implied_var, implied_val}, all_assumptions, acc)
|
||||
end) do
|
||||
{:error, :contradiction} ->
|
||||
# A direct contradiction was found during expansion.
|
||||
:error
|
||||
else
|
||||
expand_with_implications(next_assumptions, added_new)
|
||||
end
|
||||
|
||||
{:ok, newly_added} when map_size(newly_added) == 0 ->
|
||||
# Fixed point reached: no new, non-conflicting implications were found.
|
||||
{:ok, all_assumptions}
|
||||
|
||||
{:ok, newly_added} ->
|
||||
# Recursively expand with the newly found assumptions.
|
||||
expand_loop(newly_added, Map.merge(all_assumptions, newly_added))
|
||||
end
|
||||
end
|
||||
|
||||
# Checks for contradictions on a "flat" set of properties for a single entity.
|
||||
defp check_flat_consistency(assumptions) do
|
||||
# Group assumptions by category defined in Predicate.Info
|
||||
by_category =
|
||||
Enum.group_by(assumptions, fn {var, _val} ->
|
||||
case Info.get_traits(var) do
|
||||
%{category: cat} -> cat
|
||||
_ -> :unknown
|
||||
end
|
||||
end)
|
||||
# Helper for the implication reducer.
|
||||
defp reduce_implication({var, val}, all_assumptions, {:ok, new_acc}) do
|
||||
case Map.get(all_assumptions, var) do
|
||||
nil ->
|
||||
# This is a new piece of information. Add it to the set of newly_added things.
|
||||
{:ok, Map.put(new_acc, var, val)}
|
||||
|
||||
# Chain together all the individual checks
|
||||
# Add tuple, etc. checks here
|
||||
with :ok <-
|
||||
check_primary_exclusivity(
|
||||
Map.get(by_category, :atom, []),
|
||||
Map.get(by_category, :integer, []),
|
||||
Map.get(by_category, :list, []),
|
||||
Map.get(by_category, :tuple, [])
|
||||
),
|
||||
:ok <- check_atom_consistency(Map.get(by_category, :atom, [])),
|
||||
:ok <- check_integer_consistency(Map.get(by_category, :integer, [])),
|
||||
:ok <- check_list_flat_consistency(Map.get(by_category, :list, [])) do
|
||||
^val ->
|
||||
# We already knew this. Continue without adding.
|
||||
{:ok, new_acc}
|
||||
|
||||
_other_val ->
|
||||
# Contradiction! The implication conflicts with an existing assumption.
|
||||
{:error, :contradiction}
|
||||
end
|
||||
end
|
||||
|
||||
defp reduce_implication(_implication, _all, error, _), do: error
|
||||
|
||||
# --- Step 2: Flat Consistency Checks ---
|
||||
|
||||
defp check_flat_consistency(assumptions) do
|
||||
with :ok <- check_primary_type_exclusivity(assumptions),
|
||||
:ok <- check_atom_consistency(assumptions),
|
||||
:ok <- check_list_consistency(assumptions),
|
||||
:ok <- check_integer_consistency(assumptions) do
|
||||
:ok
|
||||
else
|
||||
:error -> :error
|
||||
end
|
||||
end
|
||||
|
||||
# Checks that at most one primary type is true.
|
||||
defp check_primary_exclusivity(atom_asm, int_asm, list_asm, tuple_asm) do
|
||||
# Simplified: count how many primary types are constrained to be true
|
||||
true_primary_types =
|
||||
[atom_asm, int_asm, list_asm, tuple_asm]
|
||||
|> Enum.count(fn assumptions_list ->
|
||||
# A primary type is true if its main var is explicitly true,
|
||||
# or if any of its property vars are true.
|
||||
Enum.any?(assumptions_list, fn {_, val} -> val == true end)
|
||||
end)
|
||||
defp check_primary_type_exclusivity(assumptions) do
|
||||
primary_types = [
|
||||
Variable.v_is_atom(),
|
||||
Variable.v_is_integer(),
|
||||
Variable.v_is_list(),
|
||||
Variable.v_is_tuple()
|
||||
]
|
||||
|
||||
true_primary_types = Enum.count(primary_types, &(Map.get(assumptions, &1) == true))
|
||||
|
||||
if true_primary_types > 1, do: :error, else: :ok
|
||||
end
|
||||
|
||||
# Placeholder for atom-specific checks (e.g., cannot be :foo and :bar)
|
||||
defp check_atom_consistency(_assumptions), do: :ok
|
||||
|
||||
# Placeholder for integer-specific checks (e.g., x < 5 and x > 10)
|
||||
defp check_integer_consistency(_assumptions), do: :ok
|
||||
|
||||
# Checks for flat contradictions on a list, e.g. is_empty and has_head.
|
||||
defp check_list_flat_consistency(assumptions) do
|
||||
is_empty_true =
|
||||
Enum.any?(assumptions, fn {var, val} ->
|
||||
val == true and Info.get_traits(var)[:type] == :list_prop
|
||||
defp check_atom_consistency(assumptions) do
|
||||
true_atom_values =
|
||||
Enum.reduce(assumptions, MapSet.new(), fn
|
||||
{{1, :value, atom_val, _}, true}, acc -> MapSet.put(acc, atom_val)
|
||||
_, acc -> acc
|
||||
end)
|
||||
|
||||
has_head_or_tail_prop =
|
||||
Enum.any?(assumptions, fn {var, val} ->
|
||||
val == true and Info.get_traits(var)[:type] == :list_recursive
|
||||
end)
|
||||
|
||||
if is_empty_true and has_head_or_tail_prop, do: :error, else: :ok
|
||||
if MapSet.size(true_atom_values) > 1, do: :error, else: :ok
|
||||
end
|
||||
|
||||
# Handles recursive checks for structured types.
|
||||
defp check_recursive_consistency(assumptions) do
|
||||
# This is still a complex piece of logic, but it's now more structured.
|
||||
# Partition assumptions into sub-problems (head, tail, elements)
|
||||
sub_problems =
|
||||
Enum.reduce(assumptions, %{}, fn {var, val}, acc ->
|
||||
case Info.get_traits(var) do
|
||||
%{type: :list_recursive, sub_key: key} ->
|
||||
# var is e.g. {5, :c_head, {0, :is_atom, nil, nil}, nil}
|
||||
# we want to create a sub-problem for :head with assumption {{0, :is_atom, nil, nil} => val}
|
||||
{_cat, _pred, nested_var, _pad} = var
|
||||
Map.update(acc, key, [{nested_var, val}], &[{nested_var, val} | &1])
|
||||
defp check_list_consistency(assumptions) do
|
||||
# This check is actually redundant if `expand_with_implications` works correctly,
|
||||
# as the `implies` rules for head/tail would create an explicit contradiction
|
||||
# with `is_empty == true`. However, it serves as a good safeguard.
|
||||
is_empty = Map.get(assumptions, Variable.v_list_is_empty()) == true
|
||||
|
||||
%{type: :tuple_recursive, sub_key: key} ->
|
||||
# Similar logic for tuples
|
||||
{_cat, _pred, _index, nested_var} = var
|
||||
Map.update(acc, key, [{nested_var, val}], &[{nested_var, val} | &1])
|
||||
has_head_prop = Enum.any?(assumptions, &match?({{5, :c_head, _, _}, true}, &1))
|
||||
has_tail_prop = Enum.any?(assumptions, &match?({{5, :d_tail, _, _}, true}, &1))
|
||||
|
||||
_ ->
|
||||
acc
|
||||
if is_empty and (has_head_prop or has_tail_prop), do: :error, else: :ok
|
||||
end
|
||||
|
||||
defp check_integer_consistency(assumptions) do
|
||||
# Reduce all integer-related assumptions to a single valid range [min, max].
|
||||
# If the range becomes invalid (min > max), it's a contradiction.
|
||||
initial_range = {:neg_inf, :pos_inf}
|
||||
|
||||
result =
|
||||
Enum.reduce_while(assumptions, initial_range, fn assumption, {min, max} ->
|
||||
case assumption do
|
||||
# x < N => max becomes min(max, N-1)
|
||||
{{2, :alt, n, _}, true} -> narrow_range(min, :erlang.min(max, n - 1))
|
||||
# not (x < N) <=> x >= N => min becomes max(min, N)
|
||||
{{2, :alt, n, _}, false} -> narrow_range(:erlang.max(min, n), max)
|
||||
# x == N => min becomes max(min, N), max becomes min(max, N)
|
||||
{{2, :beq, n, _}, true} -> narrow_range(:erlang.max(min, n), :erlang.min(max, n))
|
||||
# x != N : this is only a contradiction if the range is exactly {N, N}
|
||||
{{2, :beq, n, _}, false} when min == n and max == n -> {:halt, :invalid}
|
||||
# x > N => min becomes max(min, N+1)
|
||||
{{2, :cgt, n, _}, true} -> narrow_range(:erlang.max(min, n + 1), max)
|
||||
# not (x > N) <=> x <= N => max becomes min(max, N)
|
||||
{{2, :cgt, n, _}, false} -> narrow_range(min, :erlang.min(max, n))
|
||||
# Not an integer assumption, continue.
|
||||
_ -> {:cont, {min, max}}
|
||||
end
|
||||
end)
|
||||
|
||||
# Get ambient constraints (e.g., from `list_of(X)`)
|
||||
case result, do: (:invalid -> :error; _ -> :ok)
|
||||
end
|
||||
|
||||
# Helper to check if a new range is valid and continue/halt the reduction.
|
||||
defp narrow_range(min, max) do
|
||||
cond do
|
||||
min == :neg_inf or max == :pos_inf -> {:cont, {min, max}}
|
||||
min > max -> {:halt, :invalid}
|
||||
true -> {:cont, {min, max}}
|
||||
end
|
||||
end
|
||||
|
||||
# --- Step 3: Recursive Consistency (Disabled but preserved) ---
|
||||
defp check_recursive_consistency(assumptions, full_context) do
|
||||
# 1. Gather all ambient constraints from the parent context.
|
||||
ambient_constraints =
|
||||
Enum.reduce(assumptions, %{}, fn {var, true}, acc ->
|
||||
case Info.get_traits(var) do
|
||||
%{type: :list_recursive_ambient, ambient_constraint_spec: spec} ->
|
||||
# This part is tricky. How do we enforce this?
|
||||
# We need to know that the sub-problem's type is a subtype of this spec.
|
||||
# This requires the TDD compiler. This is the main circular dependency.
|
||||
# simplified for now
|
||||
Map.put(acc, :head, spec)
|
||||
Enum.reduce(full_context, %{}, fn
|
||||
# This now correctly handles all cases, not just `{var, true}`.
|
||||
{var, true}, acc ->
|
||||
case Info.get_traits(var) do
|
||||
%{type: :list_recursive_ambient, ambient_constraint_spec: spec} ->
|
||||
Map.merge(acc, %{head: spec, tail: spec})
|
||||
_ -> acc
|
||||
end
|
||||
|
||||
_ ->
|
||||
acc
|
||||
end
|
||||
_, acc ->
|
||||
acc
|
||||
end)
|
||||
|
||||
# Recursively check each sub-problem
|
||||
Enum.reduce_while(sub_problems, :ok, fn {sub_key, sub_assumptions_list}, _acc ->
|
||||
sub_assumptions_map = Map.new(sub_assumptions_list)
|
||||
# Here we would also need to check against ambient constraints.
|
||||
# e.g. is_subtype(build_type_from(sub_assumptions_map), ambient_constraints[sub_key])
|
||||
# This logic remains complex.
|
||||
# 2. Partition assumptions into sub-problems (head, tail, tuple elements).
|
||||
sub_problems =
|
||||
Enum.group_by(assumptions, &Info.get_traits(elem(&1, 0))[:sub_key])
|
||||
|> Map.drop([nil])
|
||||
|
||||
case check(sub_assumptions_map) do
|
||||
# 3. Check each sub-problem against its ambient constraint.
|
||||
Enum.reduce_while(sub_problems, :ok, fn {sub_key, sub_assumptions_list}, _acc ->
|
||||
ambient_spec = Map.get(ambient_constraints, sub_key)
|
||||
|
||||
# Re-map nested vars to base form for reconstruction
|
||||
remapped_assumptions =
|
||||
Map.new(sub_assumptions_list, fn {var, val} ->
|
||||
# Simplified pattern match to extract the inner variable
|
||||
{_cat, _pred, _idx, nested_var} = var
|
||||
{nested_var, val}
|
||||
end)
|
||||
|
||||
reconstructed_spec = TypeReconstructor.spec_from_assumptions(remapped_assumptions)
|
||||
|
||||
# Compile both specs to TDDs and check for subtyping.
|
||||
# THIS IS THE SOURCE OF THE LOGICAL CYCLE
|
||||
reconstructed_id = Compiler.spec_to_id(reconstructed_spec)
|
||||
ambient_id = Compiler.spec_to_id(ambient_spec)
|
||||
|
||||
# if Tdd.is_subtype(reconstructed_id, ambient_id) do
|
||||
# ...
|
||||
# else
|
||||
# {:halt, :error}
|
||||
# end
|
||||
# For now, we assume it's okay to proceed.
|
||||
case check(remapped_assumptions) do
|
||||
:consistent -> {:cont, :ok}
|
||||
:contradiction -> {:halt, :error}
|
||||
end
|
||||
@ -791,196 +859,121 @@ defmodule Tdd.Consistency.Engine do
|
||||
end
|
||||
end
|
||||
|
||||
# in a new file, e.g., lib/tdd/algo.ex
|
||||
# defmodule Tdd.Algo do
|
||||
# @moduledoc "Implements the core, stateless algorithms for TDD manipulation."
|
||||
# alias Tdd.Store
|
||||
# alias Tdd.Consistency.Engine
|
||||
#
|
||||
# # --- Binary Operation: Apply ---
|
||||
# @spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
|
||||
# def apply(op_name, op_lambda, u1_id, u2_id) do
|
||||
# cache_key = {op_name, Enum.sort([u1_id, u2_id])}
|
||||
#
|
||||
# case Store.get_op_cache(cache_key) do
|
||||
# {:ok, result_id} ->
|
||||
# result_id
|
||||
#
|
||||
# :not_found ->
|
||||
# result_id = do_apply(op_name, op_lambda, u1_id, u2_id)
|
||||
# Store.put_op_cache(cache_key, result_id)
|
||||
# result_id
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# defp do_apply(op_name, op_lambda, u1_id, u2_id) do
|
||||
# # This is the classic Shannon Expansion algorithm (ITE - If-Then-Else)
|
||||
# with {:ok, u1_details} <- Store.get_node(u1_id),
|
||||
# {:ok, u2_details} <- Store.get_node(u2_id) do
|
||||
# cond do
|
||||
# # Terminal cases
|
||||
# (u1_details == :true_terminal or u1_details == :false_terminal) and
|
||||
# (u2_details == :true_terminal or u2_details == :false_terminal) ->
|
||||
# res_terminal_symbol = op_lambda.(u1_details, u2_details)
|
||||
#
|
||||
# if res_terminal_symbol == :true_terminal,
|
||||
# do: Store.true_node_id(),
|
||||
# else: Store.false_node_id()
|
||||
#
|
||||
# # One is terminal, one is not
|
||||
# u1_details == :true_terminal or u1_details == :false_terminal ->
|
||||
# {var2, y2, n2, d2} = u2_details
|
||||
# res_y = apply(op_name, op_lambda, u1_id, y2)
|
||||
# res_n = apply(op_name, op_lambda, u1_id, n2)
|
||||
# res_d = apply(op_name, op_lambda, u1_id, d2)
|
||||
# Store.find_or_create_node(var2, res_y, res_n, res_d)
|
||||
#
|
||||
# u2_details == :true_terminal or u2_details == :false_terminal ->
|
||||
# {var1, y1, n1, d1} = u1_details
|
||||
# res_y = apply(op_name, op_lambda, y1, u2_id)
|
||||
# res_n = apply(op_name, op_lambda, n1, u2_id)
|
||||
# res_d = apply(op_name, op_lambda, d1, u2_id)
|
||||
# Store.find_or_create_node(var1, res_y, res_n, res_d)
|
||||
#
|
||||
# # Both are non-terminals
|
||||
# true ->
|
||||
# {var1, y1, n1, d1} = u1_details
|
||||
# {var2, y2, n2, d2} = u2_details
|
||||
# # Select top variable based on global order
|
||||
# top_var = Enum.min([var1, var2])
|
||||
#
|
||||
# res_y =
|
||||
# apply(
|
||||
# op_name,
|
||||
# op_lambda,
|
||||
# if(var1 == top_var, do: y1, else: u1_id),
|
||||
# if(var2 == top_var, do: y2, else: u2_id)
|
||||
# )
|
||||
#
|
||||
# res_n =
|
||||
# apply(
|
||||
# op_name,
|
||||
# op_lambda,
|
||||
# if(var1 == top_var, do: n1, else: u1_id),
|
||||
# if(var2 == top_var, do: n2, else: u2_id)
|
||||
# )
|
||||
#
|
||||
# res_d =
|
||||
# apply(
|
||||
# op_name,
|
||||
# op_lambda,
|
||||
# if(var1 == top_var, do: d1, else: u1_id),
|
||||
# if(var2 == top_var, do: d2, else: u2_id)
|
||||
# )
|
||||
#
|
||||
# Store.find_or_create_node(top_var, res_y, res_n, res_d)
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# # --- Unary Operation: Negation ---
|
||||
# @spec negate(non_neg_integer) :: non_neg_integer
|
||||
# def negate(tdd_id) do
|
||||
# cache_key = {:negate, tdd_id}
|
||||
#
|
||||
# case Store.get_op_cache(cache_key) do
|
||||
# {:ok, result_id} ->
|
||||
# result_id
|
||||
#
|
||||
# :not_found ->
|
||||
# result_id =
|
||||
# case Store.get_node(tdd_id) do
|
||||
# {:ok, :true_terminal} ->
|
||||
# Store.false_node_id()
|
||||
#
|
||||
# {:ok, :false_terminal} ->
|
||||
# Store.true_node_id()
|
||||
#
|
||||
# {:ok, {var, y, n, d}} ->
|
||||
# res_y = negate(y)
|
||||
# res_n = negate(n)
|
||||
# res_d = negate(d)
|
||||
# Store.find_or_create_node(var, res_y, res_n, res_d)
|
||||
# end
|
||||
#
|
||||
# Store.put_op_cache(cache_key, result_id)
|
||||
# result_id
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# # --- Unary Operation: Semantic Simplification ---
|
||||
# @spec simplify(non_neg_integer(), map()) :: non_neg_integer()
|
||||
# def simplify(tdd_id, assumptions) do
|
||||
# sorted_assumptions = Enum.sort(assumptions)
|
||||
# cache_key = {:simplify, tdd_id, sorted_assumptions}
|
||||
#
|
||||
# case Store.get_op_cache(cache_key) do
|
||||
# {:ok, result_id} ->
|
||||
# result_id
|
||||
#
|
||||
# :not_found ->
|
||||
# result_id = do_simplify(tdd_id, assumptions)
|
||||
# Store.put_op_cache(cache_key, result_id)
|
||||
# result_id
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# defp do_simplify(tdd_id, assumptions) do
|
||||
# # 1. Check if current path is contradictory.
|
||||
# if Engine.check(assumptions) == :contradiction do
|
||||
# Store.false_node_id()
|
||||
# else
|
||||
# case Store.get_node(tdd_id) do
|
||||
# # 2. Handle terminal nodes.
|
||||
# {:ok, :true_terminal} ->
|
||||
# Store.true_node_id()
|
||||
#
|
||||
# {:ok, :false_terminal} ->
|
||||
# Store.false_node_id()
|
||||
#
|
||||
# # 3. Handle non-terminal nodes.
|
||||
# {:ok, {var, y, n, d}} ->
|
||||
# # 4. Check if the variable is already constrained.
|
||||
# case Map.get(assumptions, var) do
|
||||
# true ->
|
||||
# simplify(y, assumptions)
|
||||
#
|
||||
# false ->
|
||||
# simplify(n, assumptions)
|
||||
#
|
||||
# :dc ->
|
||||
# simplify(d, assumptions)
|
||||
#
|
||||
# nil ->
|
||||
# # Not constrained, so we check for implied constraints.
|
||||
# # Note: This is an expensive part of the algorithm.
|
||||
# # (As noted, the recursive part of the check is still incomplete)
|
||||
# implies_true = Engine.check(Map.put(assumptions, var, false)) == :contradiction
|
||||
# implies_false = Engine.check(Map.put(assumptions, var, true)) == :contradiction
|
||||
#
|
||||
# cond do
|
||||
# # Should be caught by initial check
|
||||
# implies_true and implies_false ->
|
||||
# Store.false_node_id()
|
||||
#
|
||||
# implies_true ->
|
||||
# simplify(y, assumptions)
|
||||
#
|
||||
# implies_false ->
|
||||
# simplify(n, assumptions)
|
||||
#
|
||||
# true ->
|
||||
# # Recurse on all branches with new assumptions
|
||||
# s_y = simplify(y, Map.put(assumptions, var, true))
|
||||
# s_n = simplify(n, Map.put(assumptions, var, false))
|
||||
# s_d = simplify(d, Map.put(assumptions, var, :dc))
|
||||
# Store.find_or_create_node(var, s_y, s_n, s_d)
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
defmodule Tdd.Algo do
|
||||
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
|
||||
alias Tdd.Store
|
||||
alias Tdd.Consistency.Engine
|
||||
|
||||
# --- Binary Operation: Apply ---
|
||||
@spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
|
||||
def apply(op_name, op_lambda, u1_id, u2_id) do
|
||||
# Memoization wrapper
|
||||
cache_key = {:apply, op_name, Enum.sort([u1_id, u2_id])}
|
||||
case Store.get_op_cache(cache_key) do
|
||||
{:ok, result_id} -> result_id
|
||||
:not_found ->
|
||||
result_id = do_apply(op_name, op_lambda, u1_id, u2_id)
|
||||
Store.put_op_cache(cache_key, result_id)
|
||||
result_id
|
||||
end
|
||||
end
|
||||
|
||||
defp do_apply(op_name, op_lambda, u1_id, u2_id) do
|
||||
with {:ok, u1_details} <- Store.get_node(u1_id),
|
||||
{:ok, u2_details} <- Store.get_node(u2_id) do
|
||||
cond do
|
||||
(u1_details == :true_terminal or u1_details == :false_terminal) and
|
||||
(u2_details == :true_terminal or u2_details == :false_terminal) ->
|
||||
if op_lambda.(u1_details, u2_details) == :true_terminal, do: Store.true_node_id(), else: Store.false_node_id()
|
||||
(u1_details == :true_terminal or u1_details == :false_terminal) ->
|
||||
{var2, y2, n2, d2} = u2_details
|
||||
Store.find_or_create_node(var2, apply(op_name, op_lambda, u1_id, y2), apply(op_name, op_lambda, u1_id, n2), apply(op_name, op_lambda, u1_id, d2))
|
||||
(u2_details == :true_terminal or u2_details == :false_terminal) ->
|
||||
{var1, y1, n1, d1} = u1_details
|
||||
Store.find_or_create_node(var1, apply(op_name, op_lambda, y1, u2_id), apply(op_name, op_lambda, n1, u2_id), apply(op_name, op_lambda, d1, u2_id))
|
||||
true ->
|
||||
{var1, y1, n1, d1} = u1_details; {var2, y2, n2, d2} = u2_details
|
||||
top_var = Enum.min([var1, var2])
|
||||
res_y = apply(op_name, op_lambda, if(var1 == top_var, do: y1, else: u1_id), if(var2 == top_var, do: y2, else: u2_id))
|
||||
res_n = apply(op_name, op_lambda, if(var1 == top_var, do: n1, else: u1_id), if(var2 == top_var, do: n2, else: u2_id))
|
||||
res_d = apply(op_name, op_lambda, if(var1 == top_var, do: d1, else: u1_id), if(var2 == top_var, do: d2, else: u2_id))
|
||||
Store.find_or_create_node(top_var, res_y, res_n, res_d)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# --- Unary Operation: Negation ---
|
||||
@spec negate(non_neg_integer) :: non_neg_integer
|
||||
def negate(tdd_id) do
|
||||
cache_key = {:negate, tdd_id}
|
||||
case Store.get_op_cache(cache_key) do
|
||||
{:ok, result_id} -> result_id
|
||||
:not_found ->
|
||||
result_id =
|
||||
case Store.get_node(tdd_id) do
|
||||
{:ok, :true_terminal} -> Store.false_node_id()
|
||||
{:ok, :false_terminal} -> Store.true_node_id()
|
||||
{:ok, {var, y, n, d}} -> Store.find_or_create_node(var, negate(y), negate(n), negate(d))
|
||||
end
|
||||
Store.put_op_cache(cache_key, result_id)
|
||||
result_id
|
||||
end
|
||||
end
|
||||
|
||||
# --- Unary Operation: Semantic Simplification ---
|
||||
@spec simplify(non_neg_integer(), map()) :: non_neg_integer
|
||||
def simplify(tdd_id, assumptions \\ %{}) do
|
||||
# Sort assumptions to ensure the cache key is canonical.
|
||||
sorted_assumptions = Enum.sort(assumptions)
|
||||
cache_key = {:simplify, tdd_id, sorted_assumptions}
|
||||
case Store.get_op_cache(cache_key) do
|
||||
{:ok, result_id} -> result_id
|
||||
:not_found ->
|
||||
result_id = do_simplify(tdd_id, assumptions)
|
||||
Store.put_op_cache(cache_key, result_id)
|
||||
result_id
|
||||
end
|
||||
end
|
||||
|
||||
defp do_simplify(tdd_id, assumptions) do
|
||||
# 1. Check if the current path is contradictory. If so, prune this branch.
|
||||
if Engine.check(assumptions) == :contradiction do
|
||||
Store.false_node_id()
|
||||
else
|
||||
case Store.get_node(tdd_id) do
|
||||
# 2. Terminal nodes are already simple.
|
||||
{:ok, :true_terminal} -> Store.true_node_id()
|
||||
{:ok, :false_terminal} -> Store.false_node_id()
|
||||
|
||||
# 3. Handle non-terminal nodes.
|
||||
{:ok, {var, y, n, d}} ->
|
||||
# 4. Check if the variable's value is already known or implied.
|
||||
case Map.get(assumptions, var) do
|
||||
true -> simplify(y, assumptions)
|
||||
false -> simplify(n, assumptions)
|
||||
:dc -> simplify(d, assumptions)
|
||||
nil ->
|
||||
# The variable is not explicitly constrained. Check for implied constraints.
|
||||
# Note: For now, the Engine only performs flat checks.
|
||||
implies_true = Engine.check(Map.put(assumptions, var, false)) == :contradiction
|
||||
implies_false = Engine.check(Map.put(assumptions, var, true)) == :contradiction
|
||||
|
||||
cond do
|
||||
implies_true and implies_false -> Store.false_node_id()
|
||||
implies_true -> simplify(y, assumptions)
|
||||
implies_false -> simplify(n, assumptions)
|
||||
true ->
|
||||
# No constraint, so recursively simplify all branches.
|
||||
s_y = simplify(y, Map.put(assumptions, var, true))
|
||||
s_n = simplify(n, Map.put(assumptions, var, false))
|
||||
s_d = simplify(d, Map.put(assumptions, var, :dc))
|
||||
Store.find_or_create_node(var, s_y, s_n, s_d)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
defmodule Tdd.TypeReconstructor do
|
||||
@moduledoc """
|
||||
Reconstructs a high-level `TypeSpec` from a low-level assumption map.
|
||||
@ -1096,7 +1089,95 @@ defmodule Tdd.TypeReconstructor do
|
||||
TypeSpec.normalize({:intersect, specs})
|
||||
end
|
||||
end
|
||||
defmodule Tdd.Compiler do
|
||||
@moduledoc "Compiles a `TypeSpec` into a canonical TDD ID."
|
||||
alias Tdd.TypeSpec
|
||||
alias Tdd.Variable
|
||||
alias Tdd.Store
|
||||
alias Tdd.Algo
|
||||
|
||||
@doc "The main entry point. Takes a spec and returns its TDD ID."
|
||||
@spec spec_to_id(TypeSpec.t()) :: non_neg_integer()
|
||||
def spec_to_id(spec) do
|
||||
# Memoization wrapper for the entire compilation process.
|
||||
normalized_spec = TypeSpec.normalize(spec)
|
||||
cache_key = {:spec_to_id, normalized_spec}
|
||||
|
||||
case Store.get_op_cache(cache_key) do
|
||||
{:ok, id} -> id
|
||||
:not_found ->
|
||||
id = do_spec_to_id(normalized_spec)
|
||||
Store.put_op_cache(cache_key, id)
|
||||
id
|
||||
end
|
||||
end
|
||||
|
||||
# The core compilation logic
|
||||
defp do_spec_to_id(spec) do
|
||||
raw_id =
|
||||
case spec do
|
||||
# --- Base Types ---
|
||||
:any -> Store.true_node_id()
|
||||
:none -> Store.false_node_id()
|
||||
:atom -> Store.find_or_create_node(Variable.v_is_atom(), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
:integer -> Store.find_or_create_node(Variable.v_is_integer(), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
# Add :list, :tuple etc. here. They are simple structural TDDs.
|
||||
:list -> Store.find_or_create_node(Variable.v_is_list(), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
|
||||
# --- Literal Types ---
|
||||
{:literal, val} when is_atom(val) ->
|
||||
eq_node = Store.find_or_create_node(Variable.v_atom_eq(val), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
Store.find_or_create_node(Variable.v_is_atom(), eq_node, Store.false_node_id(), Store.false_node_id())
|
||||
{:literal, val} when is_integer(val) ->
|
||||
eq_node = Store.find_or_create_node(Variable.v_int_eq(val), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
Store.find_or_create_node(Variable.v_is_integer(), eq_node, Store.false_node_id(), Store.false_node_id())
|
||||
{:literal, []} ->
|
||||
empty_node = Store.find_or_create_node(Variable.v_list_is_empty(), Store.true_node_id(), Store.false_node_id(), Store.false_node_id())
|
||||
Store.find_or_create_node(Variable.v_is_list(), empty_node, Store.false_node_id(), Store.false_node_id())
|
||||
# Add other literals as needed
|
||||
|
||||
# --- Set-Theoretic Combinators ---
|
||||
{:union, specs} ->
|
||||
ids = Enum.map(specs, &spec_to_id/1)
|
||||
Enum.reduce(ids, Store.false_node_id(), fn id, acc ->
|
||||
Algo.apply(:sum, fn
|
||||
:true_terminal, _ -> :true_terminal
|
||||
_, :true_terminal -> :true_terminal
|
||||
:false_terminal, t2 -> t2
|
||||
t1, :false_terminal -> t1
|
||||
end, id, acc)
|
||||
end)
|
||||
|
||||
{:intersect, specs} ->
|
||||
ids = Enum.map(specs, &spec_to_id/1)
|
||||
Enum.reduce(ids, Store.true_node_id(), fn id, acc ->
|
||||
Algo.apply(:intersect, fn
|
||||
:false_terminal, _ -> :false_terminal
|
||||
_, :false_terminal -> :false_terminal
|
||||
:true_terminal, t2 -> t2
|
||||
t1, :true_terminal -> t1
|
||||
end, id, acc)
|
||||
end)
|
||||
|
||||
{:negation, sub_spec} ->
|
||||
Algo.negate(spec_to_id(sub_spec))
|
||||
|
||||
# --- Recursive Types (STUBS for now) ---
|
||||
# These will be implemented in Step 3
|
||||
{:list_of, _} -> raise "Tdd.Compiler: :list_of not yet implemented"
|
||||
{:tuple, _} -> raise "Tdd.Compiler: {:tuple, [...]} not yet implemented"
|
||||
{:cons, _, _} -> raise "Tdd.Compiler: :cons not yet implemented"
|
||||
|
||||
# --- Default ---
|
||||
_ ->
|
||||
raise "Tdd.Compiler: Cannot compile unknown spec: #{inspect(spec)}"
|
||||
end
|
||||
|
||||
# CRUCIAL: Every constructed TDD must be passed through simplify
|
||||
# to ensure it's in its canonical, semantically-reduced form.
|
||||
Algo.simplify(raw_id)
|
||||
end
|
||||
end
|
||||
####
|
||||
# xxx
|
||||
####
|
||||
@ -1921,10 +2002,80 @@ defmodule TypeReconstructorTests do
|
||||
end
|
||||
end
|
||||
end
|
||||
defmodule CompilerAlgoTests do
|
||||
alias Tdd.Compiler
|
||||
alias Tdd.Store
|
||||
|
||||
# High-level helpers that mimic the final API
|
||||
defp is_subtype(spec1, spec2) do
|
||||
id1 = Compiler.spec_to_id(spec1)
|
||||
id2 = Compiler.spec_to_id(spec2)
|
||||
# is_subtype(A, B) <=> (A & ~B) == none
|
||||
neg_id2 = Tdd.Algo.negate(id2)
|
||||
intersect_id = Tdd.Algo.apply(:intersect, &(&1 == :false_terminal or &2 == :false_terminal), id1, neg_id2)
|
||||
final_id = Tdd.Algo.simplify(intersect_id)
|
||||
final_id == Store.false_node_id()
|
||||
end
|
||||
|
||||
defp are_equivalent(spec1, spec2) do
|
||||
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
|
||||
end
|
||||
|
||||
defp test_subtype(name, expected, s1, s2), do: test(name, expected, is_subtype(s1, s2))
|
||||
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
|
||||
defp test(name, exp, res) do
|
||||
|
||||
is_ok = exp==res
|
||||
status = if is_ok, do: "[PASS]", else: "[FAIL]"
|
||||
IO.puts("#{status} #{name}")
|
||||
|
||||
unless is_ok do
|
||||
IO.puts(" Expected: #{inspect(exp)}")
|
||||
IO.puts(" Got: #{inspect(res)}")
|
||||
Process.put(:test_failures, [name | Process.get(:test_failures, [])])
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
def run() do
|
||||
IO.puts("\n--- Running Compiler & Algo Integration Tests ---")
|
||||
# Setup
|
||||
Tdd.Store.init()
|
||||
|
||||
# --- Section: Basic Compilation & Equivalence ---
|
||||
test_equiv("atom & any == atom", true, {:intersect, [:atom, :any]}, :atom)
|
||||
test_equiv("atom | none == atom", true, {:union, [:atom, :none]}, :atom)
|
||||
test_equiv("atom & int == none", true, {:intersect, [:atom, :integer]}, :none)
|
||||
test_equiv("¬(¬atom) == atom", true, {:negation, {:negation, :atom}}, :atom)
|
||||
|
||||
# --- Section: Subtyping Tests ---
|
||||
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
|
||||
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
|
||||
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
|
||||
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
|
||||
test_subtype("none <: atom", true, :none, :atom)
|
||||
test_subtype("atom <: any", true, :atom, :any)
|
||||
|
||||
# --- Section: De Morgan's Law Check ---
|
||||
# ¬(A | B) == (¬A & ¬B)
|
||||
spec_not_a_or_b = {:negation, {:union, [:atom, :integer]}}
|
||||
spec_not_a_and_not_b = {:intersect, [{:negation, :atom}, {:negation, :integer}]}
|
||||
test_equiv("De Morgan's Law holds", true, spec_not_a_or_b, spec_not_a_and_not_b)
|
||||
|
||||
# --- Final Report ---
|
||||
failures = Process.get(:test_failures, [])
|
||||
|
||||
if failures == [] do
|
||||
IO.puts("\n✅ All CompilerAlgoTests tests passed!")
|
||||
else
|
||||
IO.puts("\n❌ Found #{length(failures)} test failures.")
|
||||
end
|
||||
end
|
||||
end
|
||||
TypeSpecTests.run()
|
||||
TddStoreTests.run()
|
||||
TddVariableTests.run()
|
||||
# TddAlgoTests.run()
|
||||
# ConsistencyEngineTests.run()
|
||||
TypeReconstructorTests.run()
|
||||
CompilerAlgoTests.run()
|
||||
|
||||
284
refactor.md
Normal file
284
refactor.md
Normal file
@ -0,0 +1,284 @@
|
||||
|
||||
## Motivation for Architectural Evolution of the TDD-based Type System
|
||||
|
||||
**To:** Project Stakeholders & Technical Team
|
||||
**From:** System Architect
|
||||
**Date:** October 26, 2023
|
||||
**Subject:** A Principled Path Forward for Stability, Extensibility, and Polymorphism
|
||||
|
||||
### 1. Introduction & Current Success
|
||||
|
||||
The existing Ternary Decision Diagram (TDD) implementation has been remarkably successful in proving the core value of a set-theoretic approach to types. By representing types as canonical graphs, we have achieved powerful and efficient operations for union, intersection, negation, and subtyping. The system's ability to automatically simplify complex type expressions like `(atom | tuple) & (tuple | :foo)` into a canonical form `(tuple | :foo)` is a testament to the power of this model.
|
||||
|
||||
However, as we scale the system to represent more complex, real-world data structures—specifically recursive types like `list(X)`—a foundational architectural issue has been identified. This issue currently compromises the system's correctness and severely limits its future potential.
|
||||
|
||||
This document motivates a series of architectural changes designed to address this issue, creating a robust foundation for future growth, including the long-term goal of supporting polymorphism.
|
||||
|
||||
### 2. The Core Challenge: The Unstable Variable Problem
|
||||
|
||||
The correctness of an Ordered TDD relies on a **fixed, global, total order of all predicate variables**. Our current implementation for recursive types like `list_of(X)` violates this principle.
|
||||
|
||||
- **The Flaw:** We currently construct predicate variables by embedding the integer TDD ID of the element type `X`. For instance, `list_of(atom)` might use the variable `{5, :all_elements, 4}` if `type_atom()` resolves to TDD ID `4`.
|
||||
- **The Consequence:** TDD IDs are artifacts of construction order; they are not stable. If `type_integer()` is created before `type_atom()`, their IDs will swap, and the global variable order will change. This means that **semantically identical types can produce structurally different TDDs**, breaking the canonical representation that is the central promise of our system. This is not a theoretical concern; it is a critical bug that invalidates equivalence checks and subtyping operations under different run conditions.
|
||||
|
||||
This instability is a symptom of a deeper issue: we are conflating a type's **logical description** with its **compiled, set-theoretic representation**.
|
||||
|
||||
### 3. The Proposed Solution: A Separation of Concerns
|
||||
|
||||
To solve this, we will introduce a principled separation between the "what" and the "how" of our types.
|
||||
|
||||
1. **`TypeSpec`: The Logical "What"**
|
||||
We will introduce a new, stable, declarative data structure called a `TypeSpec`. A `TypeSpec` is a structural description of a type (e.g., `{:list_of, :atom}`, `{:union, [:integer, :atom]}`). This becomes the **new public API and canonical language** for defining types. It is human-readable, stable, and completely independent of the TDD implementation.
|
||||
|
||||
2. **TDD ID: The Compiled "How"**
|
||||
The TDD graph and its ID will be treated as a **compiled artifact**. It is the powerful, efficient, set-theoretic representation of a concrete `TypeSpec`. The `Tdd` module will evolve into a low-level "backend" or "compiler engine."
|
||||
|
||||
This separation yields immediate and long-term benefits:
|
||||
|
||||
- **Guaranteed Correctness & Stability:** By using the stable `TypeSpec` within our TDD variables (e.g., `{5, :all_elements, {:base, :atom}}`), we restore a fixed global variable order. This guarantees that our TDDs are once again truly canonical, ensuring the reliability of all type operations.
|
||||
- **Clean Architecture & API:** The public-facing API will be vastly improved, operating on clear, declarative `TypeSpec`s instead of opaque integer IDs. The internal complexity of the TDD machinery is encapsulated behind a new `Compiler` module, improving maintainability.
|
||||
- **Unlocking Future Capabilities:** This architecture is not just a bug fix; it is an enabling refactor. A system that "compiles" a `TypeSpec` to a TDD is naturally extensible to polymorphism. The `TypeSpec` for `list(A)` can be represented as `{:list_of, {:type_var, :A}}`. A higher-level type inference engine can then operate on these specs, substituting variables and using our TDD compiler for concrete subtype checks. This provides a clear, principled path to our goal of supporting polymorphic functions and data structures.
|
||||
|
||||
### 4. Phased Implementation Plan
|
||||
|
||||
We will roll out these changes in three deliberate phases to manage complexity and deliver value incrementally.
|
||||
|
||||
- **Phase 1: Stabilize the Core.** Introduce `TypeSpec` and the `Compiler` to fix the unstable variable problem. This is the highest priority, as it ensures the correctness of our existing feature set.
|
||||
- **Phase 2: Refine the Public API.** Build a new, high-level `TypeSystem` module that operates exclusively on `TypeSpec`s, providing a clean and intuitive interface for users of our library.
|
||||
- **Phase 3: Introduce Polymorphism.** With the stable foundation in place, implement the unification and substitution logic required to handle `TypeSpec`s containing type variables, enabling the checking of polymorphic function schemas.
|
||||
|
||||
### 5. Conclusion
|
||||
|
||||
The current TDD system is a powerful proof of concept that is on the verge of becoming a robust, general-purpose tool. By addressing the foundational "unstable variable" problem through a principled separation of a type's specification from its implementation, we not only fix a critical bug but also pave a clear and logical path toward our most ambitious goals. This architectural evolution is a necessary investment in the long-term stability, correctness, and extensibility of our type system.
|
||||
|
||||
|
||||
### Phase 1: Fix the Core Instability & Introduce `TypeSpec`
|
||||
|
||||
This phase is non-negotiable. It makes your current system sound and stable.
|
||||
|
||||
**1. Introduce the `TypeSpec` Data Structure:**
|
||||
This will be the new "source of truth" for defining types. It's a stable, structural representation. This becomes the primary vocabulary for your public API.
|
||||
|
||||
```elixir
|
||||
# In a new file, e.g., typespec.ex
|
||||
defmodule Tdd.TypeSpec do
|
||||
@typedoc """
|
||||
A stable, structural representation of a type.
|
||||
This is the primary way users and the higher-level system will describe types.
|
||||
"""
|
||||
@type t ::
|
||||
# Base Types
|
||||
:any
|
||||
| :none
|
||||
| :atom
|
||||
| :integer
|
||||
| :tuple # any tuple
|
||||
| :list # any list
|
||||
|
||||
# Literal Types (canonical form of a value)
|
||||
| {:literal, term()}
|
||||
|
||||
# Set-theoretic Combinators
|
||||
| {:union, [t()]}
|
||||
| {:intersect, [t()]}
|
||||
| {:negation, t()}
|
||||
|
||||
# Parameterized Structural Types
|
||||
| {:tuple, [t()]} # A tuple with specific element types
|
||||
| {:cons, t(), t()} # A non-empty list [H|T]
|
||||
| {:list_of, t()} # A list of any length where elements are of a type
|
||||
|
||||
# Integer Range (example of specific properties)
|
||||
| {:integer_range, integer() | :neg_inf, integer() | :pos_inf}
|
||||
|
||||
# Polymorphic Variable (for Phase 3)
|
||||
| {:type_var, atom()}
|
||||
end
|
||||
```
|
||||
|
||||
**2. Create a "Compiler": `Tdd.Compiler.spec_to_id/1`**
|
||||
This is the new heart of your system. It's a memoized function that takes a `TypeSpec` and returns a canonical TDD ID. Your existing `Tdd` module becomes the "backend" for this compiler.
|
||||
|
||||
```elixir
|
||||
# In a new file, e.g., compiler.ex
|
||||
defmodule Tdd.Compiler do
|
||||
# This module manages the cache from spec -> id
|
||||
# Process.put(:spec_to_id_cache, %{})
|
||||
|
||||
def spec_to_id(spec) do
|
||||
# 1. Normalize the spec (e.g., sort unions) to improve cache hits.
|
||||
normalized_spec = Tdd.TypeSpec.normalize(spec)
|
||||
|
||||
# 2. Check cache for `normalized_spec`. If found, return ID.
|
||||
|
||||
# 3. If not found, compile it by calling private helpers.
|
||||
id =
|
||||
case normalized_spec do
|
||||
:atom -> Tdd.type_atom()
|
||||
:integer -> Tdd.type_integer()
|
||||
{:literal, val} -> build_literal_tdd(val)
|
||||
{:union, specs} ->
|
||||
ids = Enum.map(specs, &spec_to_id/1)
|
||||
Enum.reduce(ids, Tdd.type_none(), &Tdd.sum/2)
|
||||
# ... and so on ...
|
||||
{:list_of, element_spec} -> build_list_of_tdd(element_spec)
|
||||
end
|
||||
|
||||
# 4. Cache and return the new ID.
|
||||
id
|
||||
end
|
||||
|
||||
# The key fix is here:
|
||||
defp build_list_of_tdd(element_spec) do
|
||||
# A) Compile the element spec to get its ID for semantic checks.
|
||||
element_id = spec_to_id(element_spec)
|
||||
|
||||
# B) Create the TDD variable using the STABLE element_spec.
|
||||
# This is the critical change.
|
||||
stable_var = Tdd.v_list_all_elements_are(element_spec)
|
||||
|
||||
# C) Use Tdd module to build the node, passing both the stable
|
||||
# variable and the compiled ID for the consistency checker.
|
||||
# (This requires a small change in the Tdd module).
|
||||
Tdd.type_list_of_internal(stable_var, element_id)
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
**3. Modify the `Tdd` Module to be a "Low-Level Backend":**
|
||||
The public API of `Tdd` shrinks. Its main users are now `Tdd.Compiler` and `Tdd.TypeSystem` (Phase 2).
|
||||
|
||||
* **Change Variable Constructors:** `v_list_all_elements_are` now takes a `TypeSpec`.
|
||||
```elixir
|
||||
# In Tdd module
|
||||
# The variable now contains the stable TypeSpec, not a volatile ID.
|
||||
def v_list_all_elements_are(element_spec), do: {5, :all_elements, element_spec}
|
||||
```
|
||||
|
||||
* **Update `check_assumptions_consistency`:** This function now needs access to the compiler to handle the new variable format.
|
||||
```elixir
|
||||
# In Tdd module, inside the consistency checker
|
||||
# ... when it encounters a `v_list_all_elements_are` assumption...
|
||||
{{5, :all_elements, element_spec}, true} ->
|
||||
# It needs to know the TDD ID for this spec to do subtyping checks.
|
||||
ambient_type_for_head = Tdd.Compiler.spec_to_id(element_spec)
|
||||
# ... rest of the logic proceeds as before ...
|
||||
```
|
||||
This creates a circular dependency (`Compiler` -> `Tdd` -> `Compiler`), which is a sign of deep coupling. It's acceptable for now, but a sign that this logic might eventually belong in the compiler itself.
|
||||
|
||||
* **Refactor Type Constructors:** Your old `type_list_of`, `type_tuple`, etc., are either removed or renamed to `_internal` and used by the new `Compiler`.
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Build the High-Level Type System API
|
||||
|
||||
This is the new public-facing interface. It operates on `TypeSpec`s and uses the `Compiler` and `Tdd` modules as its engine.
|
||||
|
||||
```elixir
|
||||
# In a new file, e.g., type_system.ex
|
||||
defmodule TypeSystem do
|
||||
alias Tdd.TypeSpec, as: Spec
|
||||
|
||||
# --- Public Type Constructors (using TypeSpecs) ---
|
||||
def type_atom, do: :atom
|
||||
def type_integer, do: :integer
|
||||
def type_union(specs), do: {:union, specs}
|
||||
def type_list_of(spec), do: {:list_of, spec}
|
||||
# ... and so on ...
|
||||
|
||||
# --- Public Operations ---
|
||||
@doc """
|
||||
Checks if spec1 is a subtype of spec2.
|
||||
"""
|
||||
def is_subtype(spec1, spec2) do
|
||||
# 1. Compile both specs to their canonical TDD IDs.
|
||||
id1 = Tdd.Compiler.spec_to_id(spec1)
|
||||
id2 = Tdd.Compiler.spec_to_id(spec2)
|
||||
|
||||
# 2. Use the fast, low-level TDD check.
|
||||
Tdd.is_subtype(id1, id2)
|
||||
end
|
||||
|
||||
def intersect(spec1, spec2) do
|
||||
# This is a choice:
|
||||
# Option A (easy): Just return {:intersect, [spec1, spec2]} and let the compiler
|
||||
# handle it lazily when `spec_to_id` is called.
|
||||
# Option B (better): Do some immediate simplifications here before compiling.
|
||||
# e.g., intersect(:atom, :integer) -> :none
|
||||
{:intersect, [spec1, spec2]}
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
**At the end of Phase 2, you have a sound, stable, and extensible set-theoretic type checker.** The "god function" problem still exists in `check_assumptions_consistency`, but the system is no longer fundamentally broken.
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Introduce Polymorphism
|
||||
|
||||
Now you leverage the `TypeSpec` architecture.
|
||||
|
||||
**1. Define Polymorphic Function Schemas:**
|
||||
These are data structures, living outside the TDD world.
|
||||
|
||||
```elixir
|
||||
# A function schema
|
||||
@type fun_schema :: {:function,
|
||||
forall: [:atom], # list of quantified type variables
|
||||
args: [Spec.t()], # argument types (can use quantified vars)
|
||||
return: Spec.t() # return type (can use quantified vars)
|
||||
}
|
||||
|
||||
# Example: Enum.map/2
|
||||
# forall A, B. (list(A), (A -> B)) -> list(B)
|
||||
map_schema = {:function,
|
||||
forall: [:A, :B],
|
||||
args: [
|
||||
{:list_of, {:type_var, :A}},
|
||||
{:function, forall: [], args: [{:type_var, :A}], return: {:type_var, :B}}
|
||||
],
|
||||
return: {:list_of, {:type_var, :B}}
|
||||
}
|
||||
```
|
||||
|
||||
**2. Implement a Unification/Substitution Engine:**
|
||||
This is a new, separate module that operates *only on `TypeSpec`s and schemas*.
|
||||
|
||||
```elixir
|
||||
defmodule Type.Unify do
|
||||
# Takes a spec with variables and a concrete spec, returns a substitution map
|
||||
# or :error.
|
||||
# unify({:list_of, {:type_var, :A}}, {:list_of, :integer}) -> {:ok, %{A: :integer}}
|
||||
def unify(polymorphic_spec, concrete_spec) do
|
||||
# ... recursive pattern matching logic ...
|
||||
end
|
||||
|
||||
# Applies a substitution map to a spec.
|
||||
# substitute({:list_of, {:type_var, :A}}, %{A: :integer}) -> {:list_of, :integer}
|
||||
def substitute(spec, substitution_map) do
|
||||
# ... recursive substitution logic ...
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
**3. Build the Type Checker for Function Calls:**
|
||||
This is the final piece that ties everything together.
|
||||
|
||||
```elixir
|
||||
# In TypeSystem module
|
||||
def check_function_call(function_schema, concrete_arg_specs) do
|
||||
# 1. Unify the schema's arg specs with the concrete arg specs.
|
||||
# This will produce a substitution map, e.g., %{A: :integer, B: :atom}.
|
||||
{:ok, substitution_map} = Unify.unify(function_schema.args, concrete_arg_specs)
|
||||
|
||||
# 2. Check that the concrete args are valid subtypes.
|
||||
# (This step is actually part of unification itself).
|
||||
|
||||
# 3. Infer the concrete return type by substituting into the schema's return spec.
|
||||
concrete_return_spec = Unify.substitute(function_schema.return, substitution_map)
|
||||
|
||||
{:ok, concrete_return_spec}
|
||||
end
|
||||
```
|
||||
|
||||
This phased approach solves your immediate problem correctly, provides a clean public API, and builds the exact foundation needed to add a polymorphic layer on top without requiring another major architectural upheaval.
|
||||
Loading…
x
Reference in New Issue
Block a user