WTD AM I DOING, REVERT TO THE PREVIOUS COMMIT IF SMTH GOES WRONG

This commit is contained in:
Kacper Marzecki 2025-06-29 23:47:12 +02:00
parent 5972da6ecc
commit 90b063eb30
2 changed files with 499 additions and 107 deletions

View File

@ -511,4 +511,44 @@ defmodule Tdd.Debug do
end
end
end
@doc "Logs a value with a label, using IO.inspect."
def log(value, label, opts \\ []) do
# You can add a flag here to disable all logging globally
if true do
# Default options for better visibility
default_opts = [width: 120, pretty: true]
final_opts = Keyword.merge(default_opts, opts)
IO.inspect(value, [{:label, "[DEBUG] #{label}"} | final_opts])
end
value # Return the original value to allow piping
end
@doc "Gets and increments a depth counter for tracing recursion."
defp get_depth() do
depth = Process.get(:debug_depth, 0)
Process.put(:debug_depth, depth + 1)
String.duplicate(" ", depth)
end
@doc "Decrements the depth counter."
defp dec_depth() do
depth = Process.get(:debug_depth, 1) |> Kernel.-(1) |> max(0)
Process.put(:debug_depth, depth)
end
@doc "Logs a message with indentation for tracing recursion."
def log_entry(label) do
prefix = get_depth()
IO.inspect(prefix, label: "PREFIX")
IO.puts( "#{prefix}[DEBUG] >> #{label}")
end
@doc "Logs a return value with indentation."
def log_exit( value, label) do
dec_depth()
prefix = String.duplicate(" ", Process.get(:debug_depth, 0))
IO.inspect(value, label: prefix <> "[DEBUG] << #{label}")
value # Return the value
end
end

562
new.exs
View File

@ -184,29 +184,42 @@ defmodule Tdd.TypeSpec do
{:intersect, members} ->
normalize_intersection_pass1(members, env, counter)
{:list_of, element_spec} ->
{normalized_element, counter_after_element} =
normalize_pass1(element_spec, env, counter)
# THIS IS THE CANONICAL FIX.
{:list_of, element_spec} ->
# We transform `list_of(E)` into a `mu` expression.
# This expression will then be normalized by a recursive call.
internal_rec_var_name = :"$ListOfInternalRecVar_Pass1$"
# First, normalize the element's spec.
{normalized_element, counter_after_element} =
normalize_pass1(element_spec, env, counter)
list_body =
{:union,
[
{:literal, []},
{:cons, normalized_element, {:type_var, internal_rec_var_name}}
]}
# Create a *temporary, non-canonical* name for the recursive variable.
# The subsequent `normalize_pass1` call on the `mu` form will perform
# the proper, canonical renaming.
temp_rec_var = :"$list_of_rec_var"
normalize_pass1({:mu, internal_rec_var_name, list_body}, env, counter_after_element)
list_body =
{:union,
[
{:literal, []},
{:cons, normalized_element, {:type_var, temp_rec_var}}
]}
{:mu, var_name, body} ->
fresh_temp_name = fresh_var_name(:p1_m_var, counter)
body_env = Map.put(env, var_name, {:type_var, fresh_temp_name})
# Now, normalize the full mu-expression. This is the crucial step.
# It will handle alpha-conversion of `temp_rec_var` and normalization
# of the body's components.
normalize_pass1({:mu, temp_rec_var, list_body}, env, counter_after_element)
{normalized_body, next_counter_after_body} =
normalize_pass1(body, body_env, counter + 1)
# ... ensure the other cases are correct ...
{{:mu, fresh_temp_name, normalized_body}, next_counter_after_body}
{:mu, var_name, body} ->
# This logic is correct. It creates a fresh canonical name and
# adds it to the environment for normalizing the body.
fresh_temp_name = fresh_var_name(:p1_m_var, counter)
body_env = Map.put(env, var_name, {:type_var, fresh_temp_name})
{normalized_body, next_counter_after_body} =
normalize_pass1(body, body_env, counter + 1)
{{:mu, fresh_temp_name, normalized_body}, next_counter_after_body}
{:type_lambda, param_names, body} ->
{reversed_fresh_temp_names, next_counter_after_params, body_env} =
@ -682,34 +695,30 @@ defmodule Tdd.TypeSpec do
e2 = extract_list_mu_element(b2_body, v2)
do_is_subtype_structural?(e1, e2, new_visited)
# Canonical vars are the same, implies types were structurally identical before renaming, or one unfolds to other
# The key insight: for structural subtyping before TDDs,
# we only consider mu-types equivalent if their canonical forms are identical.
# My previous fix was too aggressive and belongs in a TDD-based check.
v1 == v2 ->
# spec1 is the mu-type itself (μv1.b1_body)
subst_map1 = %{v1 => spec1}
# spec2 is the mu-type itself (μv2.b2_body)
subst_map2 = %{v2 => spec2}
unfolded_b1 = substitute_vars_canonical(b1_body, subst_map1)
unfolded_b2 = substitute_vars_canonical(b2_body, subst_map2)
do_is_subtype_structural?(unfolded_b1, unfolded_b2, new_visited)
# They are alpha-equivalent
true
# Different canonical mu-variables, not list forms
true ->
# This path is taken for Mu_B_final vs Mu_C_final if spec1/spec2 were Mu_B_final/Mu_C_final *before* normalization.
# If they were normalized, they'd be identical if equivalent.
# If is_subtype? is called with already-canonical forms Mu_B_final and Mu_C_final,
# then v1 = :m_var1, v2 = :m_var2. So this `true` branch is taken.
# spec1 is the mu-type itself (μv1.b1_body)
# subst_map1 = %{v1 => spec1}
# # spec2 is the mu-type itself (μv2.b2_body)
# subst_map2 = %{v2 => spec2}
# unfolded_b1 = substitute_vars_canonical(b1_body, subst_map1)
# unfolded_b2 = substitute_vars_canonical(b2_body, subst_map2)
# do_is_subtype_structural?(unfolded_b1, unfolded_b2, new_visited)
# Different canonical variables means different types at this stage.
false
end
{{:mu, v_s1, body_s1}, :list} ->
is_list_mu_form(body_s1, v_s1)
# ADD THIS CASE: A concrete type vs. a recursive type.
# This is critical for checking if an instance belongs to a recursive type.
{_non_mu_spec, {:mu, v2, b2_body} = mu_spec2} ->
# Unfold the mu-type once and re-check.
unfolded_b2 = substitute_vars_canonical(b2_body, %{v2 => mu_spec2})
do_is_subtype_structural?(spec1, unfolded_b2, new_visited)
# ADD THIS CASE: A recursive type vs. a concrete type (usually false).
{{:mu, _, _}, _non_mu_spec} ->
# A recursive type (potentially infinite set) cannot be a subtype
# of a non-recursive, finite type (unless it's :any, which is already handled).
false
{{:negation, n_body1}, {:negation, n_body2}} ->
do_is_subtype_structural?(n_body2, n_body1, new_visited)
@ -1417,7 +1426,7 @@ defmodule Tdd.Algo do
use Tdd.Debug
alias Tdd.Store
alias Tdd.Consistency.Engine
alias Tdd.Debug
# --- Binary Operation: Apply ---
@spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer
def apply(op_name, op_lambda, u1_id, u2_id) do
@ -1571,73 +1580,137 @@ defmodule Tdd.Algo do
# The private helper now takes a `context` to detect infinite recursion.
# The private helper now takes a `context` to detect infinite recursion.
defp do_simplify(tdd_id, sorted_assumptions, context) do
Debug.log_entry("do_simplify(#{tdd_id}, #{inspect(sorted_assumptions)})")
current_state = {tdd_id, sorted_assumptions}
# Coinductive base case: if we have seen this exact state before in this
# call stack, we are in a loop.
if MapSet.member?(context, current_state) do
Debug.log("LOOP DETECTED", "do_simplify: Coinductive Case")
Store.true_node_id()
|> Debug.log_exit("do_simplify RETURNING from loop")
else
new_context = MapSet.put(context, current_state)
assumptions = Map.new(sorted_assumptions)
# If the assumptions themselves are contradictory, the result is `none`.
if Engine.check(assumptions) == :contradiction do
Store.false_node_id()
Debug.log(assumptions, "do_simplify: Assumptions are contradictory")
Debug.log_exit(Store.false_node_id(), "do_simplify RETURNING from contradiction")
else
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
Debug.log_exit(Store.true_node_id(), "do_simplify RETURNING from true_terminal")
{:ok, :false_terminal} ->
Store.false_node_id()
Debug.log_exit(Store.false_node_id(), "do_simplify RETURNING from false_terminal")
{:ok, {var, y, n, d}} ->
# Check if the variable's value is already explicitly known.
case Map.get(assumptions, var) do
true ->
do_simplify(y, sorted_assumptions, new_context)
Debug.log(var, "do_simplify: Node variable")
false ->
do_simplify(n, sorted_assumptions, new_context)
# NEW: Add a pattern match here to detect recursive variables
case var do
# This variable means "the head must satisfy the type represented by NESTED_VAR"
{5, :c_head, nested_var, _} ->
handle_recursive_subproblem(
:simplify,
:head,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
:dc ->
do_simplify(d, sorted_assumptions, new_context)
# This variable means "the tail must satisfy the type represented by NESTED_VAR"
{5, :d_tail, nested_var, _} ->
handle_recursive_subproblem(
:simplify,
:tail,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
# The variable's value is not explicitly known.
# Check if the context of assumptions *implies* its value.
nil ->
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) == :contradiction
# This variable means "element `index` must satisfy NESTED_VAR"
{4, :b_element, index, nested_var} ->
handle_recursive_subproblem(
:simplify,
{:elem, index},
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) == :contradiction
cond do
# This case should be caught by the top-level Engine.check, but is here for safety.
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
# If the context implies `var` must be true, we follow the 'yes' path.
# We do NOT add `{var, true}` to the assumptions for the recursive call,
# as that would cause the assumption set to grow infinitely and break
# the coinductive check. The original `assumptions` set already
# contains all the necessary information for the engine to work.
assumptions_imply_true ->
do_simplify(y, sorted_assumptions, new_context)
# Likewise, if the context implies `var` must be false, follow the 'no' path
# with the original, unmodified assumptions.
assumptions_imply_false ->
do_simplify(n, sorted_assumptions, new_context)
# The variable is truly independent. We must simplify all branches,
# adding the new assumption for each respective path, and reconstruct the node.
_ ->
# Check if the variable's value is already explicitly known.
case Map.get(assumptions, var) do
true ->
s_y = do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
s_n = do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
s_d = do_simplify(d, Enum.sort(Map.put(assumptions, var, :dc)), new_context)
Store.find_or_create_node(var, s_y, s_n, s_d)
do_simplify(y, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (true)")
false ->
do_simplify(n, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (false)")
:dc ->
do_simplify(d, sorted_assumptions, new_context)
|> Debug.log_exit("do_simplify RETURNING from known var (dc)")
# The variable's value is not explicitly known.
# Check if the context of assumptions *implies* its value.
nil ->
Debug.log(nil, "do_simplify: Var not known, checking implications")
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) ==
:contradiction
|> Debug.log("do_simplify: assumptions_imply_true?")
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) ==
:contradiction
|> Debug.log("do_simplify: assumptions_imply_false?")
cond do
# This case should be caught by the top-level Engine.check, but is here for safety.
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
# If the context implies `var` must be true, we follow the 'yes' path.
# We do NOT add `{var, true}` to the assumptions for the recursive call,
# as that would cause the assumption set to grow infinitely and break
# the coinductive check. The original `assumptions` set already
# contains all the necessary information for the engine to work.
assumptions_imply_true ->
do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
|> Debug.log_exit("do_simplify RETURNING from implied var (true)")
# Likewise, if the context implies `var` must be false, follow the 'no' path
# with the original, unmodified assumptions.
assumptions_imply_false ->
do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
|> Debug.log_exit("do_simplify RETURNING from implied var (false)")
# The variable is truly independent. We must simplify all branches,
# adding the new assumption for each respective path, and reconstruct the node.
true ->
Debug.log("independent", "do_simplify: Var is independent, branching")
s_y =
do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context)
s_n =
do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context)
s_d =
do_simplify(d, Enum.sort(Map.put(assumptions, var, :dc)), new_context)
Store.find_or_create_node(var, s_y, s_n, s_d)
|> Debug.log_exit("do_simplify RETURNING from branch reconstruction")
end
end
end
end
@ -1763,6 +1836,203 @@ defmodule Tdd.Algo do
# end
# end
# end
@doc """
Checks if a TDD is coinductively empty.
This is used for subtyping checks. Returns `0` if empty, or a non-zero ID otherwise.
"""
@spec check_emptiness(non_neg_integer()) :: non_neg_integer()
def check_emptiness(tdd_id) do
cache_key = {:check_emptiness, tdd_id}
case Store.get_op_cache(cache_key) do
{:ok, id} ->
id
:not_found ->
# The main call starts with no assumptions.
# We convert the map to a sorted list for the recursive helper
# to ensure canonical cache keys inside the recursion.
assumptions_list = []
result_id = do_check_emptiness(tdd_id, assumptions_list, MapSet.new())
Store.put_op_cache(cache_key, result_id)
result_id
end
end
# This is the recursive helper for the emptiness check.
# It is almost identical to do_simplify, but with the opposite coinductive rule.
defp do_check_emptiness(tdd_id, sorted_assumptions, context) do
current_state = {tdd_id, sorted_assumptions}
# Coinductive base case for EMPTINESS
if MapSet.member?(context, current_state) do
# A loop means no finite counterexample was found on this path.
# So, for the emptiness check, this path is considered empty.
Store.false_node_id()
else
new_context = MapSet.put(context, current_state)
assumptions = Map.new(sorted_assumptions)
if Engine.check(assumptions) == :contradiction do
Store.false_node_id()
else
case Store.get_node(tdd_id) do
{:ok, :true_terminal} ->
Store.true_node_id()
{:ok, :false_terminal} ->
Store.false_node_id()
{:ok, {var, y, n, d}} ->
case var do
# NEW: Add the same pattern matches here
{5, :c_head, nested_var, _} ->
handle_recursive_subproblem(
:check_emptiness,
:head,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
{5, :d_tail, nested_var, _} ->
handle_recursive_subproblem(
:check_emptiness,
:tail,
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
{4, :b_element, index, nested_var} ->
handle_recursive_subproblem(
:check_emptiness,
{:elem, index},
nested_var,
{var, y, n, d},
sorted_assumptions,
context
)
_ ->
case Map.get(assumptions, var) do
true ->
do_check_emptiness(y, sorted_assumptions, new_context)
false ->
do_check_emptiness(n, sorted_assumptions, new_context)
:dc ->
do_check_emptiness(d, sorted_assumptions, new_context)
nil ->
assumptions_imply_true =
Engine.check(Map.put(assumptions, var, false)) == :contradiction
assumptions_imply_false =
Engine.check(Map.put(assumptions, var, true)) == :contradiction
cond do
assumptions_imply_true and assumptions_imply_false ->
Store.false_node_id()
assumptions_imply_true ->
do_check_emptiness(
y,
Enum.sort(Map.put(assumptions, var, true)),
new_context
)
assumptions_imply_false ->
do_check_emptiness(
n,
Enum.sort(Map.put(assumptions, var, false)),
new_context
)
true ->
s_y =
do_check_emptiness(
y,
Enum.sort(Map.put(assumptions, var, true)),
new_context
)
s_n =
do_check_emptiness(
n,
Enum.sort(Map.put(assumptions, var, false)),
new_context
)
s_d =
do_check_emptiness(
d,
Enum.sort(Map.put(assumptions, var, :dc)),
new_context
)
Store.find_or_create_node(var, s_y, s_n, s_d)
end
end
end
end
end
end
end
@doc """
Handles recursive simplification by setting up a sub-problem.
It gathers all assumptions related to a sub-component (like the head or tail),
unwraps them, and uses them to simplify the TDD of the sub-component's type.
"""
defp handle_recursive_subproblem(
algo_type,
sub_key,
constraint_spec, # This is now a TypeSpec, not an ID or a variable.
node_details,
sorted_assumptions,
context
) do
{_var, y, n, _d} = node_details
assumptions = Map.new(sorted_assumptions)
# 1. Partition assumptions to get those relevant to the sub-problem.
{sub_assumptions_raw, other_assumptions_list} =
Enum.partition(assumptions, fn {v, _} ->
(Tdd.Predicate.Info.get_traits(v) || %{})[:sub_key] == sub_key
end)
# 2. Reconstruct the sub-spec from the relevant assumptions.
# First, remap the keys from their scoped form to their base form.
sub_assumptions_map =
Map.new(sub_assumptions_raw, fn {{_c, _p, inner_v, _n}, val} -> {inner_v, val} end)
reconstructed_sub_spec = Tdd.TypeReconstructor.spec_from_assumptions(sub_assumptions_map)
# 3. Check if the reconstructed spec is a subtype of the required constraint spec.
# The `constraint_spec` is now passed in directly from the TDD variable.
is_sub = Tdd.Compiler.is_subtype_with_context(reconstructed_sub_spec, constraint_spec, context)
# 4. Branch accordingly.
if is_sub do
# The constraint is satisfied. Predicate is 'true'. Follow YES.
case algo_type do
:simplify -> do_simplify(y, sorted_assumptions, context)
:check_emptiness -> do_check_emptiness(y, sorted_assumptions, context)
end
else
# The constraint is violated. Predicate is 'false'. Follow NO.
case algo_type do
:simplify -> do_simplify(n, Enum.sort(other_assumptions_list), context)
:check_emptiness -> do_check_emptiness(n, Enum.sort(other_assumptions_list), context)
end
end
end
end
defmodule Tdd.TypeReconstructor do
@ -1906,6 +2176,7 @@ defmodule Tdd.Compiler do
alias Tdd.Variable
alias Tdd.Store
alias Tdd.Algo
alias Tdd.Debug
@doc """
The main public entry point. Takes a spec and returns its TDD ID.
@ -1977,7 +2248,8 @@ defmodule Tdd.Compiler do
Algo.simplify(final_id)
# Atomic types, literals, and compound types (union, intersect, etc.)
_ ->
other ->
Tdd.Debug.log(other, "compile_normalized_spec")
# Pass context for children's compilation
raw_id = do_structural_compile(normalized_spec, context)
Algo.simplify(raw_id)
@ -2043,10 +2315,25 @@ defmodule Tdd.Compiler do
Algo.negate(compile_normalized_spec(sub_spec, context))
{:cons, head_spec, tail_spec} ->
id_head = compile_normalized_spec(head_spec, context)
id_tail = compile_normalized_spec(tail_spec, context)
# context passed for sub_problem
compile_cons_from_ids(id_head, id_tail, context)
# The logic is now much more direct. We don't need a helper.
id_list = compile_normalized_spec(:list, context)
id_is_empty = create_base_type_tdd(Variable.v_list_is_empty())
id_not_is_empty = Algo.negate(id_is_empty)
non_empty_list_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id_list, id_not_is_empty)
# Embed the SPECS directly into the variables.
head_checker_var = Variable.v_list_head_pred(head_spec)
head_checker_tdd = create_base_type_tdd(head_checker_var)
tail_checker_var = Variable.v_list_tail_pred(tail_spec)
tail_checker_tdd = create_base_type_tdd(tail_checker_var)
[non_empty_list_id, head_checker_tdd, tail_checker_tdd]
|> Enum.reduce(compile_normalized_spec(:any, context), fn id, acc ->
Algo.apply(:intersect, &op_intersect_terminals/2, id, acc)
end)
{:tuple, elements_specs} ->
# Renamed for clarity
@ -2127,9 +2414,8 @@ defmodule Tdd.Compiler do
end)
end
defp compile_tuple_from_elements(elements_specs, context) do
defp compile_tuple_from_elements(elements_specs, context) do
size = length(elements_specs)
# Ensures :tuple is properly compiled
base_id = compile_normalized_spec(:tuple, context)
size_tdd = create_base_type_tdd(Variable.v_tuple_size_eq(size))
initial_id = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, size_tdd)
@ -2137,13 +2423,13 @@ defmodule Tdd.Compiler do
elements_specs
|> Enum.with_index()
|> Enum.reduce(initial_id, fn {elem_spec, index}, acc_id ->
# Compile the element's spec using the main path
elem_id = compile_normalized_spec(elem_spec, context)
elem_key_constructor = &Variable.v_tuple_elem_pred(index, &1)
elem_checker = sub_problem(elem_key_constructor, elem_id, context)
Algo.apply(:intersect, &op_intersect_terminals/2, acc_id, elem_checker)
# Embed the element's SPEC directly into the variable.
elem_checker_var = Variable.v_tuple_elem_pred(index, elem_spec)
elem_checker_tdd = create_base_type_tdd(elem_checker_var)
Algo.apply(:intersect, &op_intersect_terminals/2, acc_id, elem_checker_tdd)
end)
end
end
# sub_problem and do_sub_problem remain largely the same,
# but ensure `spec_to_id` called within `do_sub_problem` (for placeholder case)
@ -2248,17 +2534,83 @@ defmodule Tdd.Compiler do
# --- Public Subtyping Check (from CompilerAlgoTests) ---
@doc "Checks if spec1 is a subtype of spec2 using TDDs."
@spec is_subtype(TypeSpec.t(), TypeSpec.t()) :: boolean
@doc """
Internal subtype check that carries the compilation context for mu-variables.
"""
# def is_subtype_with_context(spec1, spec2, context) do
# # We don't need the full debug logging here, it's an internal call.
# id1 = compile_normalized_spec(spec1, context)
# id2 = compile_normalized_spec(spec2, context)
#
# neg_id2 = Algo.negate(id2)
# intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
# final_id = Algo.check_emptiness(intersect_id)
#
# final_id == Store.false_node_id()
# end
# Make the public function use this new internal one with an empty context.
def is_subtype(spec1, spec2) do
Debug.log({spec1, spec2}, "Compiler.is_subtype/2: INPUTS")
# Normalize specs UPFRONT
norm_s1 = Tdd.TypeSpec.normalize(spec1)
norm_s2 = Tdd.TypeSpec.normalize(spec2)
# Start the check with a fresh context.
is_subtype_with_context(norm_s1, norm_s2, %{})
end
def is_subtype_with_context(spec1, spec2, context) do
id1 = compile_normalized_spec(spec1, context)
id2 = compile_normalized_spec(spec2, context)
neg_id2 = Algo.negate(id2)
intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
final_id = Algo.check_emptiness(intersect_id)
final_id == Store.false_node_id()
end
def is_subtype(spec1, spec2) do
id1 = spec_to_id(spec1)
id2 = spec_to_id(spec2)
# A <: B <=> A & ~B == none
neg_id2 = Algo.negate(id2)
intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
# Simplify is crucial for coinductive reasoning with recursive types
final_id = Algo.simplify(intersect_id)
final_id == Store.false_node_id()
Debug.log({spec1, spec2}, "Compiler.is_subtype/2: INPUTS")
id1 =
spec_to_id(spec1)
|> Debug.log("Compiler.is_subtype/2: id1 (for spec1)")
id2 =
spec_to_id(spec2)
|> Debug.log("Compiler.is_subtype/2: id2 (for spec2)")
neg_id2 =
Algo.negate(id2)
|> Debug.log("Compiler.is_subtype/2: neg_id2")
intersect_id =
Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
|> Debug.log("Compiler.is_subtype/2: intersect_id (id1 & ~id2)")
final_id =
Algo.check_emptiness(intersect_id)
|> Debug.log("Compiler.is_subtype/2: final_id (after check_emptiness)")
result =
final_id ==
Store.false_node_id()
|> Debug.log("Compiler.is_subtype/2: FINAL RESULT (is final_id == 0?)")
result
end
# @spec is_subtype(TypeSpec.t(), TypeSpec.t()) :: boolean
# def is_subtype(spec1, spec2) do
# id1 = spec_to_id(spec1)
# id2 = spec_to_id(spec2)
# # A <: B <=> A & ~B == none
# neg_id2 = Algo.negate(id2)
# intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2)
# # Simplify is crucial for coinductive reasoning with recursive types
# final_id = Algo.simplify(intersect_id)
# final_id == Store.false_node_id()
# end
end
####