kinda works
This commit is contained in:
parent
bb7187b0c7
commit
8a6f84238e
236
new.exs
236
new.exs
@ -7,7 +7,7 @@ defmodule Tdd.Debug do
|
|||||||
# --- Agent for Tracing State ---
|
# --- Agent for Tracing State ---
|
||||||
@agent_name Tdd.Debug.StateAgent
|
@agent_name Tdd.Debug.StateAgent
|
||||||
|
|
||||||
defp init_agent_if_needed do
|
def init_agent_if_needed do
|
||||||
case Process.whereis(@agent_name) do
|
case Process.whereis(@agent_name) do
|
||||||
nil -> Agent.start_link(fn -> MapSet.new() end, name: @agent_name)
|
nil -> Agent.start_link(fn -> MapSet.new() end, name: @agent_name)
|
||||||
_pid -> :ok
|
_pid -> :ok
|
||||||
@ -15,19 +15,19 @@ defmodule Tdd.Debug do
|
|||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
|
|
||||||
defp add_traced_pid(pid) when is_pid(pid) do
|
def add_traced_pid(pid) when is_pid(pid) do
|
||||||
init_agent_if_needed()
|
init_agent_if_needed()
|
||||||
Agent.update(@agent_name, &MapSet.put(&1, pid))
|
Agent.update(@agent_name, &MapSet.put(&1, pid))
|
||||||
end
|
end
|
||||||
|
|
||||||
defp remove_traced_pid(pid) when is_pid(pid) do
|
def remove_traced_pid(pid) when is_pid(pid) do
|
||||||
case Process.whereis(@agent_name) do
|
case Process.whereis(@agent_name) do
|
||||||
nil -> :ok
|
nil -> :ok
|
||||||
agent_pid -> Agent.cast(agent_pid, fn state -> MapSet.delete(state, pid) end)
|
agent_pid -> Agent.cast(agent_pid, fn state -> MapSet.delete(state, pid) end)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp is_pid_traced?(pid) when is_pid(pid) do
|
def is_pid_traced?(pid) when is_pid(pid) do
|
||||||
case Process.whereis(@agent_name) do
|
case Process.whereis(@agent_name) do
|
||||||
nil ->
|
nil ->
|
||||||
false
|
false
|
||||||
@ -47,7 +47,7 @@ defmodule Tdd.Debug do
|
|||||||
add_traced_pid(pid_to_trace)
|
add_traced_pid(pid_to_trace)
|
||||||
ref = Process.monitor(pid_to_trace)
|
ref = Process.monitor(pid_to_trace)
|
||||||
|
|
||||||
Process.spawn_link(fn ->
|
Process.spawn(fn ->
|
||||||
receive do
|
receive do
|
||||||
{:DOWN, ^ref, :process, ^pid_to_trace, _reason} ->
|
{:DOWN, ^ref, :process, ^pid_to_trace, _reason} ->
|
||||||
remove_traced_pid(pid_to_trace)
|
remove_traced_pid(pid_to_trace)
|
||||||
@ -55,7 +55,7 @@ defmodule Tdd.Debug do
|
|||||||
3_600_000 -> # 1 hour safety timeout
|
3_600_000 -> # 1 hour safety timeout
|
||||||
remove_traced_pid(pid_to_trace)
|
remove_traced_pid(pid_to_trace)
|
||||||
end
|
end
|
||||||
end)
|
end, [:monitor])
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -77,20 +77,18 @@ defmodule Tdd.Debug do
|
|||||||
|
|
||||||
# --- Process Dictionary for Call Depth ---
|
# --- Process Dictionary for Call Depth ---
|
||||||
defp get_depth, do: Process.get(:tdd_debug_depth, 0)
|
defp get_depth, do: Process.get(:tdd_debug_depth, 0)
|
||||||
defp increment_depth do
|
def increment_depth do
|
||||||
new_depth = get_depth() + 1
|
new_depth = get_depth() + 1
|
||||||
Process.put(:tdd_debug_depth, new_depth)
|
Process.put(:tdd_debug_depth, new_depth)
|
||||||
new_depth
|
new_depth
|
||||||
end
|
end
|
||||||
defp decrement_depth do
|
def decrement_depth do
|
||||||
new_depth = max(0, get_depth() - 1)
|
new_depth = max(0, get_depth() - 1)
|
||||||
Process.put(:tdd_debug_depth, new_depth)
|
Process.put(:tdd_debug_depth, new_depth)
|
||||||
new_depth
|
new_depth
|
||||||
end
|
end
|
||||||
|
|
||||||
# --- Core Macro Logic ---
|
# --- Core Macro Logic ---
|
||||||
@inspect_limit 100 # Default limit for inspect calls by this module
|
|
||||||
|
|
||||||
defmacro __using__(_opts) do
|
defmacro __using__(_opts) do
|
||||||
quote do
|
quote do
|
||||||
import Kernel, except: [def: 1, def: 2, defp: 1, defp: 2]
|
import Kernel, except: [def: 1, def: 2, defp: 1, defp: 2]
|
||||||
@ -108,78 +106,132 @@ defmodule Tdd.Debug do
|
|||||||
defmacro defp(call, clauses \\ Keyword.new()) do
|
defmacro defp(call, clauses \\ Keyword.new()) do
|
||||||
generate_traced_function(:defp, call, clauses, __CALLER__)
|
generate_traced_function(:defp, call, clauses, __CALLER__)
|
||||||
end
|
end
|
||||||
|
defp is_simple_variable_ast?(ast_node) do
|
||||||
|
case ast_node do
|
||||||
|
{var_name, _meta, _context} when is_atom(var_name) ->
|
||||||
|
var_name != :_
|
||||||
|
_ -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
defp generate_traced_function(type, call_ast, clauses, caller_env) do
|
||||||
|
require Macro # Good practice
|
||||||
|
|
||||||
defp generate_traced_function(type, call_ast, clauses, caller_env) do
|
{function_name_ast, meta_call, original_args_patterns_ast_nullable} = call_ast
|
||||||
{function_name_ast, meta_call, original_args_patterns_ast} = call_ast
|
original_args_patterns_ast_list = original_args_patterns_ast_nullable || []
|
||||||
original_args_patterns_ast_list = original_args_patterns_ast || []
|
|
||||||
|
|
||||||
original_body_ast =
|
original_body_ast =
|
||||||
(if Keyword.keyword?(clauses) do
|
(if Keyword.keyword?(clauses) do
|
||||||
Keyword.get(clauses, :do, clauses)
|
Keyword.get(clauses, :do, clauses)
|
||||||
else
|
else
|
||||||
clauses # Body is directly provided
|
clauses
|
||||||
end) || quote(do: nil) # Default to `do: nil` if no body
|
end) || quote(do: nil)
|
||||||
|
|
||||||
# Transform arguments: `pattern` becomes `__td_arg_N__ = pattern`
|
|
||||||
# And collect the `__td_arg_N__` variables for logging.
|
|
||||||
|
|
||||||
# Step 1: Map original patterns to a list of {new_pattern_ast, generated_var_ast} tuples
|
|
||||||
# Enum.with_index provides the index for unique variable naming.
|
|
||||||
mapped_and_generated_vars_tuples =
|
mapped_and_generated_vars_tuples =
|
||||||
Enum.map(Enum.with_index(original_args_patterns_ast_list), fn {pattern_ast, index} ->
|
Enum.map(Enum.with_index(original_args_patterns_ast_list), fn {original_pattern_ast, index} ->
|
||||||
# Create a unique, hygienic variable name like __td_arg_0__
|
# __td_arg_N__ is for logging, make it hygienic with `nil` context (or __MODULE__)
|
||||||
# Using caller_env.module for context makes the variable hygienic to the calling module.
|
td_arg_var = Macro.var(String.to_atom("__td_arg_#{index}__"), nil)
|
||||||
generated_var_name = String.to_atom("__td_arg_#{index}__")
|
|
||||||
generated_var_ast = Macro.var(generated_var_name, caller_env.module)
|
|
||||||
|
|
||||||
# This AST represents: __td_arg_N__ = original_pattern_N
|
{final_pattern_for_head, rhs_for_td_arg_assignment} =
|
||||||
new_pattern_ast = quote do
|
case original_pattern_ast do
|
||||||
unquote(generated_var_ast) = unquote(pattern_ast)
|
# 1. Ignored variable: `_`
|
||||||
end
|
# AST: {:_, meta, context_module_or_nil}
|
||||||
|
{:_, _, _} = underscore_ast ->
|
||||||
|
{underscore_ast, quote(do: :__td_ignored_argument__)}
|
||||||
|
|
||||||
{new_pattern_ast, generated_var_ast}
|
# 2. Assignment pattern: `var = pattern` or `var = _`
|
||||||
|
# AST: {:=, meta, [lhs, rhs_of_assign]}
|
||||||
|
{:=, _meta_assign, [lhs_of_assign, _rhs_of_assign]} = assignment_pattern_ast ->
|
||||||
|
if is_simple_variable_ast?(lhs_of_assign) do
|
||||||
|
{assignment_pattern_ast, lhs_of_assign} # Head uses `var = pattern`, log `var`
|
||||||
|
else
|
||||||
|
# LHS is complex (e.g., `%{key: v} = pattern`), capture the whole value.
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_assign_#{index}"), Elixir)
|
||||||
|
new_head_pattern = quote do unquote(captured_val_var) = unquote(assignment_pattern_ast) end
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
|
||||||
|
# 3. Default argument: `pattern_before_default \\ default_value`
|
||||||
|
# AST: {:\|, meta, [pattern_before_default, default_value_ast]}
|
||||||
|
{:\\, _meta_default, [pattern_before_default, _default_value_ast]} = default_arg_pattern_ast ->
|
||||||
|
cond do
|
||||||
|
# 3a. `var \\ default`
|
||||||
|
is_simple_variable_ast?(pattern_before_default) ->
|
||||||
|
{default_arg_pattern_ast, pattern_before_default}
|
||||||
|
|
||||||
|
# 3b. `(var = inner_pattern) \\ default`
|
||||||
|
match?({:=, _, [lhs_inner_assign, _]}, pattern_before_default) and
|
||||||
|
is_simple_variable_ast?(pattern_before_default |> elem(2) |> Enum.at(0)) ->
|
||||||
|
{:=, _, [lhs_inner_assign, _]}= pattern_before_default
|
||||||
|
# `lhs_inner_assign` is the var on the left of `=`
|
||||||
|
{default_arg_pattern_ast, lhs_inner_assign}
|
||||||
|
|
||||||
|
# 3c. `(complex_pattern) \\ default` or `(_ = inner_pattern) \\ default` etc.
|
||||||
|
true ->
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_def_#{index}"), Elixir)
|
||||||
|
new_head_pattern = quote do unquote(captured_val_var) = unquote(default_arg_pattern_ast) end
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
|
||||||
|
# 4. Simple variable `var` (checked using our helper)
|
||||||
|
# or other complex patterns/literals not caught above.
|
||||||
|
ast_node ->
|
||||||
|
if is_simple_variable_ast?(ast_node) do
|
||||||
|
{ast_node, ast_node} # Head uses `var`, log `var`
|
||||||
|
else
|
||||||
|
# It's a complex pattern (e.g., `%{a:x}`, `[h|t]`) or a literal not assignable to.
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_pat_#{index}"), Elixir)
|
||||||
|
new_head_pattern = quote do unquote(captured_val_var) = unquote(ast_node) end
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
assignment_ast = quote do unquote(td_arg_var) = unquote(rhs_for_td_arg_assignment) end
|
||||||
|
{final_pattern_for_head, assignment_ast, td_arg_var}
|
||||||
end)
|
end)
|
||||||
|
|
||||||
# Step 2: Unzip the list of tuples into two separate lists
|
{new_args_patterns_for_head_list, assignments_for_logging_vars_ast_list, generated_vars_to_log_asts} =
|
||||||
{new_args_patterns_ast_list, generated_arg_vars_asts} =
|
if mapped_and_generated_vars_tuples == [],
|
||||||
Enum.unzip(mapped_and_generated_vars_tuples)
|
do: {[], [], []},
|
||||||
|
else: mapped_and_generated_vars_tuples |> Enum.map(&Tuple.to_list(&1)) |> Enum.zip()|> Enum.map(&Tuple.to_list(&1))
|
||||||
|
|> then(fn [a, b, c] -> {a, b, c} end)
|
||||||
|
# Enum.unzip(mapped_and_generated_vars_tuples)
|
||||||
|
|
||||||
# Reconstruct the call_ast with the new argument patterns
|
new_call_ast = {function_name_ast, meta_call, new_args_patterns_for_head_list}
|
||||||
# new_args_patterns_ast_list now contains ASTs like `[__td_arg_0__ = pattern0, __td_arg_1__ = pattern1, ...]`
|
|
||||||
new_call_ast = {function_name_ast, meta_call, new_args_patterns_ast_list}
|
|
||||||
|
|
||||||
traced_body_inner_ast =
|
traced_body_inner_ast =
|
||||||
quote do
|
quote do
|
||||||
|
unquote_splicing(assignments_for_logging_vars_ast_list)
|
||||||
|
|
||||||
if Tdd.Debug.is_pid_traced?(self()) do
|
if Tdd.Debug.is_pid_traced?(self()) do
|
||||||
current_print_depth = Tdd.Debug.increment_depth()
|
current_print_depth = Tdd.Debug.increment_depth()
|
||||||
indent = String.duplicate(" ", current_print_depth - 1)
|
indent = String.duplicate(" ", current_print_depth - 1)
|
||||||
|
runtime_arg_values = [unquote_splicing(generated_vars_to_log_asts)]
|
||||||
|
|
||||||
# runtime_arg_values will be a list of the actual values bound to __td_arg_0__, __td_arg_1__, etc.
|
actual_module_name_str = Atom.to_string(unquote(caller_env.module))
|
||||||
# generated_arg_vars_asts is `[__td_arg_0_ast, __td_arg_1_ast, ...]`
|
|
||||||
runtime_arg_values = [unquote_splicing(generated_arg_vars_asts)]
|
|
||||||
|
|
||||||
caller_module_name_str = Module.split(__MODULE__) |> Enum.join(".")
|
# The function_name_ast is resolved at macro expansion time.
|
||||||
|
# If it's `def foo(...)`, `unquote(function_name_ast)` becomes `:foo`.
|
||||||
|
# If `def unquote(name_var)(...)`, it resolves `name_var`.
|
||||||
|
resolved_fn_name = unquote(function_name_ast)
|
||||||
printable_function_name_str =
|
printable_function_name_str =
|
||||||
case unquote(function_name_ast) do
|
if is_atom(resolved_fn_name) do
|
||||||
fn_name_atom when is_atom(fn_name_atom) -> Atom.to_string(fn_name_atom)
|
Atom.to_string(resolved_fn_name)
|
||||||
complex_fn_ast -> Macro.to_string(complex_fn_ast)
|
else
|
||||||
|
Macro.to_string(resolved_fn_name) # For complex names / operators if AST passed
|
||||||
end
|
end
|
||||||
|
|
||||||
IO.puts(
|
IO.puts(
|
||||||
"#{indent}CALL: #{caller_module_name_str}.#{printable_function_name_str}"
|
"#{indent}CALL: #{actual_module_name_str}.#{printable_function_name_str}"
|
||||||
)
|
)
|
||||||
IO.puts(
|
IO.puts(
|
||||||
"#{indent} ARGS: #{inspect(runtime_arg_values)}"
|
"#{indent} ARGS: #{inspect(runtime_arg_values)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
try do
|
try do
|
||||||
# The original_body_ast will execute in a context where __td_arg_N__ are bound
|
result = unquote(original_body_ast)
|
||||||
# to the values of the original patterns.
|
|
||||||
result = unquote(Macro.escape(original_body_ast, unquote: true))
|
|
||||||
|
|
||||||
_ = Tdd.Debug.decrement_depth()
|
_ = Tdd.Debug.decrement_depth()
|
||||||
IO.puts(
|
IO.puts(
|
||||||
"#{indent}RETURN from #{caller_module_name_str}.#{printable_function_name_str}: #{inspect(result)}"
|
"#{indent}RETURN from #{actual_module_name_str}.#{printable_function_name_str}: #{inspect(result)}"
|
||||||
)
|
)
|
||||||
result
|
result
|
||||||
rescue
|
rescue
|
||||||
@ -188,28 +240,19 @@ defmodule Tdd.Debug do
|
|||||||
stacktrace = __STACKTRACE__
|
stacktrace = __STACKTRACE__
|
||||||
_ = Tdd.Debug.decrement_depth()
|
_ = Tdd.Debug.decrement_depth()
|
||||||
IO.puts(
|
IO.puts(
|
||||||
"#{indent}ERROR in #{caller_module_name_str}.#{printable_function_name_str}: #{inspect(error_instance)}"
|
"#{indent}ERROR in #{actual_module_name_str}.#{printable_function_name_str}: #{inspect(error_instance)}"
|
||||||
)
|
)
|
||||||
reraise error_instance, stacktrace
|
reraise error_instance, stacktrace
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
# If not traced, execute the original body. Note: this branch will *not* have
|
unquote(original_body_ast)
|
||||||
# the __td_arg_N__ variables bound. The `new_call_ast` with these assignments
|
|
||||||
# is only used if we go into the traced path. This is a subtle point.
|
|
||||||
# To ensure the __td_arg_N__ = pattern bindings always happen,
|
|
||||||
# the final_definition_ast should *always* use new_call_ast.
|
|
||||||
# The `if` condition should only gate the logging.
|
|
||||||
# Let's adjust this: the bindings MUST happen for the body to work with the new var names if it were changed.
|
|
||||||
# However, the original_body_ast uses the original pattern variable names.
|
|
||||||
# So, the original_body_ast is fine. The `new_call_ast` is what defines the function signature.
|
|
||||||
unquote(Macro.escape(original_body_ast, unquote: true))
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
final_definition_ast =
|
final_definition_ast =
|
||||||
quote location: :keep do
|
quote location: :keep do
|
||||||
Kernel.unquote(type)(
|
Kernel.unquote(type)(
|
||||||
unquote(new_call_ast), # Use the call_ast with instrumented args: `def my_fun(__td_arg_0__ = pattern0, ...)`
|
unquote(new_call_ast),
|
||||||
do: unquote(traced_body_inner_ast)
|
do: unquote(traced_body_inner_ast)
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
@ -597,6 +640,7 @@ defmodule Tdd.TypeSpec do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defmodule Tdd.Store do
|
defmodule Tdd.Store do
|
||||||
|
use Tdd.Debug
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Manages the state of the TDD system's node graph and operation cache.
|
Manages the state of the TDD system's node graph and operation cache.
|
||||||
|
|
||||||
@ -780,6 +824,7 @@ defmodule Tdd.Store do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defmodule Tdd.Variable do
|
defmodule Tdd.Variable do
|
||||||
|
use Tdd.Debug
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Defines the canonical structure for all Tdd predicate variables.
|
Defines the canonical structure for all Tdd predicate variables.
|
||||||
|
|
||||||
@ -971,6 +1016,7 @@ end
|
|||||||
|
|
||||||
# in a new file, e.g., lib/tdd/consistency/engine.ex
|
# in a new file, e.g., lib/tdd/consistency/engine.ex
|
||||||
defmodule Tdd.Consistency.Engine do
|
defmodule Tdd.Consistency.Engine do
|
||||||
|
use Tdd.Debug
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
A rule-based engine for checking the semantic consistency of a set of assumptions.
|
A rule-based engine for checking the semantic consistency of a set of assumptions.
|
||||||
|
|
||||||
@ -1213,6 +1259,7 @@ end
|
|||||||
|
|
||||||
defmodule Tdd.Algo do
|
defmodule Tdd.Algo do
|
||||||
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
|
@moduledoc "Implements the core, stateless algorithms for TDD manipulation."
|
||||||
|
use Tdd.Debug
|
||||||
alias Tdd.Store
|
alias Tdd.Store
|
||||||
alias Tdd.Consistency.Engine
|
alias Tdd.Consistency.Engine
|
||||||
|
|
||||||
@ -1301,7 +1348,6 @@ defmodule Tdd.Algo do
|
|||||||
@spec negate(non_neg_integer) :: non_neg_integer
|
@spec negate(non_neg_integer) :: non_neg_integer
|
||||||
def negate(tdd_id) do
|
def negate(tdd_id) do
|
||||||
cache_key = {:negate, tdd_id}
|
cache_key = {:negate, tdd_id}
|
||||||
IO.inspect(tdd_id)
|
|
||||||
case Store.get_op_cache(cache_key) do
|
case Store.get_op_cache(cache_key) do
|
||||||
{:ok, result_id} ->
|
{:ok, result_id} ->
|
||||||
result_id
|
result_id
|
||||||
@ -1573,6 +1619,7 @@ defmodule Tdd.TypeReconstructor do
|
|||||||
set of predicate assumptions (e.g., from a path in a TDD) and synthesizes
|
set of predicate assumptions (e.g., from a path in a TDD) and synthesizes
|
||||||
the most specific `TypeSpec` that satisfies all of those assumptions.
|
the most specific `TypeSpec` that satisfies all of those assumptions.
|
||||||
"""
|
"""
|
||||||
|
use Tdd.Debug
|
||||||
alias Tdd.TypeSpec
|
alias Tdd.TypeSpec
|
||||||
alias Tdd.Predicate.Info
|
alias Tdd.Predicate.Info
|
||||||
alias Tdd.Variable
|
alias Tdd.Variable
|
||||||
@ -2033,7 +2080,25 @@ defmodule Tdd.Compiler do
|
|||||||
loop_until_stable(next_id, step_function, iteration + 1)
|
loop_until_stable(next_id, step_function, iteration + 1)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
def is_subtype(spec1, spec2) do
|
||||||
|
id1 = spec_to_id(spec1)
|
||||||
|
id2 = spec_to_id(spec2)
|
||||||
|
# The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
|
||||||
|
neg_id2 = Algo.negate(id2)
|
||||||
|
|
||||||
|
op_intersect = fn
|
||||||
|
:false_terminal, _ -> :false_terminal
|
||||||
|
_, :false_terminal -> :false_terminal
|
||||||
|
t, :true_terminal -> t
|
||||||
|
:true_terminal, t -> t
|
||||||
|
# Default case for non-terminal nodes, though apply handles recursion
|
||||||
|
_t1, _t2 -> :non_terminal
|
||||||
|
end
|
||||||
|
|
||||||
|
intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
|
||||||
|
final_id = Algo.simplify(intersect_id)
|
||||||
|
final_id == Store.false_node_id()
|
||||||
|
end
|
||||||
# --- Private Functions for Terminal Logic ---
|
# --- Private Functions for Terminal Logic ---
|
||||||
defp op_union_terminals(:true_terminal, _), do: :true_terminal
|
defp op_union_terminals(:true_terminal, _), do: :true_terminal
|
||||||
defp op_union_terminals(_, :true_terminal), do: :true_terminal
|
defp op_union_terminals(_, :true_terminal), do: :true_terminal
|
||||||
@ -2925,25 +2990,25 @@ defmodule CompilerAlgoTests do
|
|||||||
alias Tdd.Algo
|
alias Tdd.Algo
|
||||||
|
|
||||||
# High-level helpers that mimic the final API
|
# High-level helpers that mimic the final API
|
||||||
defp is_subtype(spec1, spec2) do
|
# defp is_subtype(spec1, spec2) do
|
||||||
id1 = Compiler.spec_to_id(spec1)
|
# id1 = Compiler.spec_to_id(spec1)
|
||||||
id2 = Compiler.spec_to_id(spec2)
|
# id2 = Compiler.spec_to_id(spec2)
|
||||||
# The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
|
# # The subtyping check is: `A <: B` if and only if `A & ~B` is empty (`:none`).
|
||||||
neg_id2 = Algo.negate(id2)
|
# neg_id2 = Algo.negate(id2)
|
||||||
|
#
|
||||||
op_intersect = fn
|
# op_intersect = fn
|
||||||
:false_terminal, _ -> :false_terminal
|
# :false_terminal, _ -> :false_terminal
|
||||||
_, :false_terminal -> :false_terminal
|
# _, :false_terminal -> :false_terminal
|
||||||
t, :true_terminal -> t
|
# t, :true_terminal -> t
|
||||||
:true_terminal, t -> t
|
# :true_terminal, t -> t
|
||||||
# Default case for non-terminal nodes, though apply handles recursion
|
# # Default case for non-terminal nodes, though apply handles recursion
|
||||||
_t1, _t2 -> :non_terminal
|
# _t1, _t2 -> :non_terminal
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
|
# intersect_id = Algo.apply(:intersect, op_intersect, id1, neg_id2)
|
||||||
final_id = Algo.simplify(intersect_id)
|
# final_id = Algo.simplify(intersect_id)
|
||||||
final_id == Store.false_node_id()
|
# final_id == Store.false_node_id()
|
||||||
end
|
# end
|
||||||
|
|
||||||
defp are_equivalent(spec1, spec2) do
|
defp are_equivalent(spec1, spec2) do
|
||||||
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
|
Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2)
|
||||||
@ -2953,7 +3018,7 @@ defmodule CompilerAlgoTests do
|
|||||||
Compiler.spec_to_id(spec) == Store.false_node_id()
|
Compiler.spec_to_id(spec) == Store.false_node_id()
|
||||||
end
|
end
|
||||||
|
|
||||||
defp test_subtype(name, expected, s1, s2), do: test(name, expected, is_subtype(s1, s2))
|
defp test_subtype(name, expected, s1, s2), do: test(name, expected, Compiler.is_subtype(s1, s2))
|
||||||
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
|
defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2))
|
||||||
defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1))
|
defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1))
|
||||||
|
|
||||||
@ -2987,6 +3052,7 @@ defmodule CompilerAlgoTests do
|
|||||||
IO.puts("\n--- Section: Basic Subtyping ---")
|
IO.puts("\n--- Section: Basic Subtyping ---")
|
||||||
Tdd.Debug.enable_tracing()
|
Tdd.Debug.enable_tracing()
|
||||||
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
|
test_subtype(":foo <: atom", true, {:literal, :foo}, :atom)
|
||||||
|
Tdd.Debug.disable_tracing()
|
||||||
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
|
test_subtype("atom <: :foo", false, :atom, {:literal, :foo})
|
||||||
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
|
test_subtype(":foo <: integer", false, {:literal, :foo}, :integer)
|
||||||
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
|
test_subtype("int==5 <: integer", true, {:literal, 5}, :integer)
|
||||||
@ -3241,6 +3307,7 @@ defmodule TddCompilerRecursiveTests do
|
|||||||
)
|
)
|
||||||
|
|
||||||
list_with_atom = {:cons, :atom, {:literal, []}}
|
list_with_atom = {:cons, :atom, {:literal, []}}
|
||||||
|
Tdd.Debug.run(fn ->
|
||||||
|
|
||||||
test_subtype(
|
test_subtype(
|
||||||
"a list with a correct element type is a subtype of list_of(E)",
|
"a list with a correct element type is a subtype of list_of(E)",
|
||||||
@ -3248,6 +3315,7 @@ defmodule TddCompilerRecursiveTests do
|
|||||||
list_with_atom,
|
list_with_atom,
|
||||||
list_of_atoms
|
list_of_atoms
|
||||||
)
|
)
|
||||||
|
end)
|
||||||
|
|
||||||
# --- Equivalence tests ---
|
# --- Equivalence tests ---
|
||||||
IO.puts("\n--- Section: Equivalence ---")
|
IO.puts("\n--- Section: Equivalence ---")
|
||||||
@ -3305,7 +3373,7 @@ defmodule TddCompilerRecursiveTests do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp test_subtype(name, expected, spec1, spec2) do
|
defp test_subtype(name, expected, spec1, spec2) do
|
||||||
result = do_is_subtype(spec1, spec2)
|
result = Compiler.is_subtype(spec1, spec2)
|
||||||
test(name, expected, result)
|
test(name, expected, result)
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -3330,7 +3398,7 @@ defmodule TddCompilerRecursiveTests do
|
|||||||
end
|
end
|
||||||
|
|
||||||
# Ensure the tracing state manager is started
|
# Ensure the tracing state manager is started
|
||||||
Tdd.Debug.init()
|
# Tdd.Debug.init()
|
||||||
Process.sleep(100)
|
Process.sleep(100)
|
||||||
# To run this new test, add the following to your main test runner script:
|
# To run this new test, add the following to your main test runner script:
|
||||||
# TddCompilerRecursiveTests.run()
|
# TddCompilerRecursiveTests.run()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user