switch to mix project for better tests
This commit is contained in:
parent
976f8250e3
commit
c2c7438d32
688
lib/debug.ex
Normal file
688
lib/debug.ex
Normal file
@ -0,0 +1,688 @@
|
|||||||
|
defmodule Tdd.Debug.TracerData do
|
||||||
|
@moduledoc """
|
||||||
|
Provides functions to sanitize Elixir terms for tracing and JSON serialization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@max_sanitize_depth 5
|
||||||
|
|
||||||
|
def sanitize_values(list) when is_list(list) do
|
||||||
|
Enum.map(list, &sanitize_value/1)
|
||||||
|
end
|
||||||
|
|
||||||
|
def sanitize_value(val) do
|
||||||
|
do_sanitize(val, @max_sanitize_depth)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp do_sanitize(val, _depth_limit)
|
||||||
|
when is_integer(val) or is_float(val) or is_atom(val) or is_boolean(val),
|
||||||
|
do: val
|
||||||
|
|
||||||
|
# Assume strings are fine
|
||||||
|
defp do_sanitize(val, _depth_limit) when is_binary(val), do: val
|
||||||
|
defp do_sanitize(nil, _depth_limit), do: nil
|
||||||
|
|
||||||
|
defp do_sanitize(val, _depth_limit) when is_pid(val), do: inspect(val)
|
||||||
|
defp do_sanitize(val, _depth_limit) when is_port(val), do: inspect(val)
|
||||||
|
defp do_sanitize(val, _depth_limit) when is_reference(val), do: inspect(val)
|
||||||
|
|
||||||
|
defp do_sanitize(val, _depth_limit) when is_function(val) do
|
||||||
|
try do
|
||||||
|
arity = Function.info(val, :arity) |> elem(1)
|
||||||
|
|
||||||
|
case Function.info(val, :name) do
|
||||||
|
{:name, name} when is_atom(name) and name != :"" ->
|
||||||
|
"<Function #{Atom.to_string(name)}/#{arity}>"
|
||||||
|
|
||||||
|
_ ->
|
||||||
|
"<Function/#{arity}>"
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
_, _ -> "<Function>"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp do_sanitize(val, depth_limit) when is_list(val) do
|
||||||
|
if depth_limit <= 0 do
|
||||||
|
"[...trimmed_list...]"
|
||||||
|
else
|
||||||
|
Enum.map(val, &do_sanitize(&1, depth_limit - 1))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp do_sanitize(val, depth_limit) when is_map(val) do
|
||||||
|
if depth_limit <= 0 do
|
||||||
|
"%{...trimmed_map...}"
|
||||||
|
else
|
||||||
|
# Regular map
|
||||||
|
if Map.has_key?(val, :__struct__) do
|
||||||
|
module = val.__struct__
|
||||||
|
# Sanitize only exposed fields, not internal :__meta__ or the :__struct__ key itself
|
||||||
|
data_to_sanitize = val |> Map.delete(:__struct__) |> Map.delete(:__meta__)
|
||||||
|
|
||||||
|
sanitized_fields =
|
||||||
|
data_to_sanitize
|
||||||
|
|> Enum.map(fn {k, v} -> {k, do_sanitize(v, depth_limit - 1)} end)
|
||||||
|
|> Enum.into(%{})
|
||||||
|
|
||||||
|
%{type: :struct, module: module, fields: sanitized_fields}
|
||||||
|
else
|
||||||
|
val
|
||||||
|
|> Enum.map(fn {k, v} ->
|
||||||
|
{do_sanitize(k, depth_limit - 1), do_sanitize(v, depth_limit - 1)}
|
||||||
|
end)
|
||||||
|
|> Enum.into(%{})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp do_sanitize(val, depth_limit) when is_tuple(val) do
|
||||||
|
if depth_limit <= 0 do
|
||||||
|
"{...trimmed_tuple...}"
|
||||||
|
else
|
||||||
|
val |> Tuple.to_list() |> Enum.map(&do_sanitize(&1, depth_limit - 1)) |> List.to_tuple()
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Fallback for other types (e.g., DateTime, Date, Time, NaiveDateTime)
|
||||||
|
defp do_sanitize(val, _depth_limit) do
|
||||||
|
# For common structs that have `to_string` or are Calendar types
|
||||||
|
cond do
|
||||||
|
is_struct(val, DateTime) or is_struct(val, Date) or is_struct(val, Time) or
|
||||||
|
is_struct(val, NaiveDateTime) ->
|
||||||
|
# Jason can handle these if they are ISO8601 strings
|
||||||
|
inspect(val)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
# Generic struct or unknown type
|
||||||
|
if is_struct(val) do
|
||||||
|
# A simpler representation
|
||||||
|
"Struct<#{inspect(val.__struct__)}>"
|
||||||
|
else
|
||||||
|
# Fallback, converts to string
|
||||||
|
inspect(val)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def sanitize_error(exception_instance, stacktrace) do
|
||||||
|
%{
|
||||||
|
type: :error,
|
||||||
|
class: Atom.to_string(exception_instance.__struct__),
|
||||||
|
message: Exception.message(exception_instance),
|
||||||
|
# Sanitize stacktrace to keep it manageable
|
||||||
|
stacktrace: Enum.map(stacktrace, &Exception.format_stacktrace_entry/1) |> Enum.take(15)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defmodule Tdd.Debug do
|
||||||
|
@moduledoc """
|
||||||
|
Provides macros to wrap `def` and `defp` for function call/return tracing.
|
||||||
|
Logs arguments and return values using `IO.inspect`.
|
||||||
|
Builds an in-memory call tree data structure per traced process.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# --- Agent for Tracing State ---
|
||||||
|
@agent_name Tdd.Debug.StateAgent
|
||||||
|
|
||||||
|
def init_agent_if_needed do
|
||||||
|
case Process.whereis(@agent_name) do
|
||||||
|
nil -> Agent.start_link(fn -> MapSet.new() end, name: @agent_name)
|
||||||
|
_pid -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
def add_traced_pid(pid) when is_pid(pid) do
|
||||||
|
init_agent_if_needed()
|
||||||
|
Agent.update(@agent_name, &MapSet.put(&1, pid))
|
||||||
|
end
|
||||||
|
|
||||||
|
def remove_traced_pid(pid) when is_pid(pid) do
|
||||||
|
case Process.whereis(@agent_name) do
|
||||||
|
nil -> :ok
|
||||||
|
agent_pid -> Agent.cast(agent_pid, fn state -> MapSet.delete(state, pid) end)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def is_pid_traced?(pid) when is_pid(pid) do
|
||||||
|
case Process.whereis(@agent_name) do
|
||||||
|
nil ->
|
||||||
|
false
|
||||||
|
|
||||||
|
agent_pid ->
|
||||||
|
try do
|
||||||
|
Agent.get(agent_pid, &MapSet.member?(&1, pid), :infinity)
|
||||||
|
rescue
|
||||||
|
# Agent might have died
|
||||||
|
_e in [Exit, ArgumentError] -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Tracing Data Storage ---
|
||||||
|
@doc """
|
||||||
|
Initializes the tracing data structures in the process dictionary.
|
||||||
|
"""
|
||||||
|
def init_trace_storage do
|
||||||
|
Process.put(:tdd_debug_call_stack, [])
|
||||||
|
Process.put(:tdd_debug_session_roots, [])
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Retrieves the collected trace data (a list of root call tree nodes)
|
||||||
|
for the current process and clears it from the process dictionary.
|
||||||
|
Children within each node and the root list itself are reversed to be in chronological order.
|
||||||
|
"""
|
||||||
|
def get_and_clear_trace_data do
|
||||||
|
data = Process.get(:tdd_debug_session_roots, [])
|
||||||
|
# Reverse children recursively and then reverse the list of roots
|
||||||
|
processed_data = Enum.map(data, &reverse_children_recursively/1) |> Enum.reverse()
|
||||||
|
|
||||||
|
Process.delete(:tdd_debug_session_roots)
|
||||||
|
Process.delete(:tdd_debug_call_stack)
|
||||||
|
processed_data
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reverse_children_recursively(node) do
|
||||||
|
if !node.children or node.children == [] do
|
||||||
|
node
|
||||||
|
else
|
||||||
|
reversed_and_processed_children =
|
||||||
|
Enum.map(node.children, &reverse_children_recursively/1)
|
||||||
|
# Children were added prepending, so reverse for chronological
|
||||||
|
|> Enum.reverse()
|
||||||
|
|
||||||
|
%{node | children: reversed_and_processed_children}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Tracing Control Functions ---
|
||||||
|
@doc "Enables function call tracing for the current process."
|
||||||
|
def enable_tracing do
|
||||||
|
pid_to_trace = self()
|
||||||
|
add_traced_pid(pid_to_trace)
|
||||||
|
# Initialize storage for call trees
|
||||||
|
init_trace_storage()
|
||||||
|
|
||||||
|
ref = Process.monitor(pid_to_trace)
|
||||||
|
|
||||||
|
Process.spawn(
|
||||||
|
fn ->
|
||||||
|
receive do
|
||||||
|
{:DOWN, ^ref, :process, ^pid_to_trace, _reason} ->
|
||||||
|
remove_traced_pid(pid_to_trace)
|
||||||
|
after
|
||||||
|
# 1 hour safety timeout
|
||||||
|
3_600_000 ->
|
||||||
|
remove_traced_pid(pid_to_trace)
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
# Changed from [:monitor] to [] as monitor is implicit with Process.monitor
|
||||||
|
[:monitor]
|
||||||
|
)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Disables function call tracing for the current process."
|
||||||
|
def disable_tracing do
|
||||||
|
remove_traced_pid(self())
|
||||||
|
# Note: Does not clear trace data, get_and_clear_trace_data() does that.
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Runs the given 0-arity function with tracing enabled.
|
||||||
|
Returns a tuple: `{{:ok, result} | {:error, {exception, stacktrace}}, trace_data}`.
|
||||||
|
Trace data is a list of call tree root nodes.
|
||||||
|
"""
|
||||||
|
def run(fun) when is_function(fun, 0) do
|
||||||
|
enable_tracing()
|
||||||
|
|
||||||
|
outcome =
|
||||||
|
try do
|
||||||
|
{:ok, fun.()}
|
||||||
|
rescue
|
||||||
|
kind -> {:error, {kind, __STACKTRACE__}}
|
||||||
|
end
|
||||||
|
|
||||||
|
trace_data = get_and_clear_trace_data()
|
||||||
|
disable_tracing()
|
||||||
|
# trace_data
|
||||||
|
# |> IO.inspect()
|
||||||
|
binary = JSON.encode!(trace_data)
|
||||||
|
File.write("trace.json", binary)
|
||||||
|
{outcome, trace_data}
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Process Dictionary for Call Depth (used for printing indent) ---
|
||||||
|
defp get_depth, do: Process.get(:tdd_debug_depth, 0)
|
||||||
|
|
||||||
|
def increment_depth do
|
||||||
|
new_depth = get_depth() + 1
|
||||||
|
Process.put(:tdd_debug_depth, new_depth)
|
||||||
|
new_depth
|
||||||
|
end
|
||||||
|
|
||||||
|
def decrement_depth do
|
||||||
|
new_depth = max(0, get_depth() - 1)
|
||||||
|
Process.put(:tdd_debug_depth, new_depth)
|
||||||
|
new_depth
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Core Macro Logic ---
|
||||||
|
defmacro __using__(_opts) do
|
||||||
|
quote do
|
||||||
|
import Kernel, except: [def: 1, def: 2, defp: 1, defp: 2]
|
||||||
|
# Import this module's public functions/macros
|
||||||
|
import Tdd.Debug
|
||||||
|
# Ensure this module is compiled for macros
|
||||||
|
require Tdd.Debug
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc false
|
||||||
|
defmacro def(call, clauses \\ Keyword.new()) do
|
||||||
|
generate_traced_function(:def, call, clauses, __CALLER__)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc false
|
||||||
|
defmacro defp(call, clauses \\ Keyword.new()) do
|
||||||
|
generate_traced_function(:defp, call, clauses, __CALLER__)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp is_simple_variable_ast?(ast_node) do
|
||||||
|
case ast_node do
|
||||||
|
{var_name, _meta, context_module}
|
||||||
|
when is_atom(var_name) and (context_module == nil or is_atom(context_module)) ->
|
||||||
|
# Ensure it's not the underscore atom itself
|
||||||
|
var_name != :_
|
||||||
|
|
||||||
|
_ ->
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp generate_traced_function(type, call_ast, clauses, caller_env) do
|
||||||
|
require Macro
|
||||||
|
|
||||||
|
{function_name_ast, meta_call, original_args_patterns_ast_nullable} = call_ast
|
||||||
|
original_args_patterns_ast_list = original_args_patterns_ast_nullable || []
|
||||||
|
|
||||||
|
original_body_ast =
|
||||||
|
if Keyword.keyword?(clauses) do
|
||||||
|
Keyword.get(clauses, :do, clauses)
|
||||||
|
else
|
||||||
|
clauses
|
||||||
|
end || quote(do: nil)
|
||||||
|
|
||||||
|
# --- Argument Handling for Logging (Existing Logic) ---
|
||||||
|
mapped_and_generated_vars_tuples =
|
||||||
|
Enum.map(Enum.with_index(original_args_patterns_ast_list), fn {original_pattern_ast, index} ->
|
||||||
|
td_arg_var = Macro.var(String.to_atom("__td_arg_#{index}__"), nil)
|
||||||
|
|
||||||
|
{final_pattern_for_head, rhs_for_td_arg_assignment} =
|
||||||
|
case original_pattern_ast do
|
||||||
|
{:_, _, _} = underscore_ast ->
|
||||||
|
{underscore_ast, quote(do: :__td_ignored_argument__)}
|
||||||
|
|
||||||
|
{:=, _meta_assign, [lhs_of_assign, _rhs_of_assign]} = assignment_pattern_ast ->
|
||||||
|
if is_simple_variable_ast?(lhs_of_assign) do
|
||||||
|
{assignment_pattern_ast, lhs_of_assign}
|
||||||
|
else
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_assign_#{index}"), Elixir)
|
||||||
|
|
||||||
|
new_head_pattern =
|
||||||
|
quote do
|
||||||
|
unquote(captured_val_var) = unquote(assignment_pattern_ast)
|
||||||
|
end
|
||||||
|
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
|
||||||
|
{:\\, _meta_default, [pattern_before_default, _default_value_ast]} =
|
||||||
|
default_arg_pattern_ast ->
|
||||||
|
cond do
|
||||||
|
is_simple_variable_ast?(pattern_before_default) ->
|
||||||
|
{default_arg_pattern_ast, pattern_before_default}
|
||||||
|
|
||||||
|
match?({:=, _, [lhs_inner_assign, _]}, pattern_before_default) and
|
||||||
|
is_simple_variable_ast?(pattern_before_default |> elem(2) |> Enum.at(0)) ->
|
||||||
|
{:=, _, [lhs_inner_assign, _]} = pattern_before_default
|
||||||
|
{default_arg_pattern_ast, lhs_inner_assign}
|
||||||
|
|
||||||
|
true ->
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_def_#{index}"), Elixir)
|
||||||
|
|
||||||
|
new_head_pattern =
|
||||||
|
quote do
|
||||||
|
unquote(captured_val_var) = unquote(default_arg_pattern_ast)
|
||||||
|
end
|
||||||
|
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
|
||||||
|
ast_node ->
|
||||||
|
if is_simple_variable_ast?(ast_node) do
|
||||||
|
{ast_node, ast_node}
|
||||||
|
else
|
||||||
|
captured_val_var = Macro.unique_var(String.to_atom("tdc_pat_#{index}"), Elixir)
|
||||||
|
|
||||||
|
new_head_pattern =
|
||||||
|
quote do
|
||||||
|
unquote(captured_val_var) = unquote(ast_node)
|
||||||
|
end
|
||||||
|
|
||||||
|
{new_head_pattern, captured_val_var}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
assignment_ast =
|
||||||
|
quote do
|
||||||
|
unquote(td_arg_var) = unquote(rhs_for_td_arg_assignment)
|
||||||
|
end
|
||||||
|
|
||||||
|
{final_pattern_for_head, assignment_ast, td_arg_var}
|
||||||
|
end)
|
||||||
|
|
||||||
|
{new_args_patterns_for_head_list, assignments_for_logging_vars_ast_list,
|
||||||
|
generated_vars_to_log_asts} =
|
||||||
|
if mapped_and_generated_vars_tuples == [] do
|
||||||
|
{[], [], []}
|
||||||
|
else
|
||||||
|
# Transpose list of 3-element tuples into 3 lists
|
||||||
|
list_of_lists = Enum.map(mapped_and_generated_vars_tuples, &Tuple.to_list(&1))
|
||||||
|
|
||||||
|
[
|
||||||
|
Enum.map(list_of_lists, &List.first(&1)),
|
||||||
|
Enum.map(list_of_lists, &Enum.at(&1, 1)),
|
||||||
|
Enum.map(list_of_lists, &Enum.at(&1, 2))
|
||||||
|
]
|
||||||
|
|> then(fn [a, b, c] -> {a, b, c} end)
|
||||||
|
end
|
||||||
|
|
||||||
|
new_call_ast = {function_name_ast, meta_call, new_args_patterns_for_head_list}
|
||||||
|
|
||||||
|
# --- Variables for Runtime Info ---
|
||||||
|
# These vars will hold string versions of module/function name and arity at runtime
|
||||||
|
# Hygienic vars to avoid collisions.
|
||||||
|
module_name_runtime_var = Macro.var(:__td_module_name__, __MODULE__)
|
||||||
|
printable_fn_name_runtime_var = Macro.var(:__td_printable_fn_name__, __MODULE__)
|
||||||
|
arity_runtime_var = Macro.var(:__td_arity__, __MODULE__)
|
||||||
|
|
||||||
|
# Arity calculation at macro expansion time
|
||||||
|
arity_value = length(original_args_patterns_ast_list)
|
||||||
|
|
||||||
|
traced_body_inner_ast =
|
||||||
|
quote do
|
||||||
|
# --- Resolve Module/Function/Arity Info at Runtime ---
|
||||||
|
unquote(module_name_runtime_var) = Atom.to_string(unquote(caller_env.module))
|
||||||
|
unquote(arity_runtime_var) = unquote(arity_value)
|
||||||
|
|
||||||
|
# Resolve var if func name is from a var
|
||||||
|
runtime_resolved_fn_name_val = unquote(function_name_ast)
|
||||||
|
|
||||||
|
unquote(printable_fn_name_runtime_var) =
|
||||||
|
if is_atom(runtime_resolved_fn_name_val) do
|
||||||
|
Atom.to_string(runtime_resolved_fn_name_val)
|
||||||
|
else
|
||||||
|
# For unquote(var) or operators
|
||||||
|
Macro.to_string(runtime_resolved_fn_name_val)
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Main Tracing Logic ---
|
||||||
|
if Tdd.Debug.is_pid_traced?(self()) do
|
||||||
|
# --- On Entry: Prepare Data & Log ---
|
||||||
|
# Assign __td_arg_N__ vars
|
||||||
|
unquote_splicing(assignments_for_logging_vars_ast_list)
|
||||||
|
runtime_arg_values_for_node_and_log = [unquote_splicing(generated_vars_to_log_asts)]
|
||||||
|
|
||||||
|
# sanitized_args_for_node = Tdd.Debug.TracerData.sanitize_values(runtime_arg_values_for_node_and_log)
|
||||||
|
sanitized_args_for_node = inspect(runtime_arg_values_for_node_and_log)
|
||||||
|
# For print indent & node depth
|
||||||
|
current_call_print_depth = Tdd.Debug.increment_depth()
|
||||||
|
|
||||||
|
# Create placeholder node, push to call_stack
|
||||||
|
new_node_details = %{
|
||||||
|
id: System.unique_integer([:positive, :monotonic]),
|
||||||
|
# module: unquote(module_name_runtime_var),
|
||||||
|
# function: unquote(printable_fn_name_runtime_var),
|
||||||
|
function:
|
||||||
|
"#{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}",
|
||||||
|
# arity: unquote(arity_runtime_var),
|
||||||
|
args: sanitized_args_for_node,
|
||||||
|
depth: current_call_print_depth,
|
||||||
|
# timestamp_enter_monotonic: System.monotonic_time(),
|
||||||
|
# Will be populated in reverse chronological order
|
||||||
|
children: [],
|
||||||
|
# Placeholder
|
||||||
|
result: :__td_not_returned_yet__
|
||||||
|
}
|
||||||
|
|
||||||
|
Process.put(:tdd_debug_call_stack, [
|
||||||
|
new_node_details | Process.get(:tdd_debug_call_stack, [])
|
||||||
|
])
|
||||||
|
|
||||||
|
# Logging Call (existing logic)
|
||||||
|
indent = String.duplicate(" ", current_call_print_depth - 1)
|
||||||
|
|
||||||
|
IO.puts(
|
||||||
|
"#{indent}CALL: #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
IO.puts("#{indent} ARGS: #{inspect(runtime_arg_values_for_node_and_log)}")
|
||||||
|
|
||||||
|
try do
|
||||||
|
# --- Execute Original Body ---
|
||||||
|
result_val = unquote(original_body_ast)
|
||||||
|
|
||||||
|
# --- On Normal Exit: Finalize Node & Log ---
|
||||||
|
[completed_node_placeholder | parent_stack_nodes] = Process.get(:tdd_debug_call_stack)
|
||||||
|
ts_exit_monotonic = System.monotonic_time()
|
||||||
|
|
||||||
|
# duration_ms = System.convert_time_unit(ts_exit_monotonic - completed_node_placeholder.timestamp_enter_monotonic, :native, :millisecond)
|
||||||
|
sanitized_result_val = Tdd.Debug.TracerData.sanitize_value(result_val)
|
||||||
|
|
||||||
|
finalized_node =
|
||||||
|
completed_node_placeholder
|
||||||
|
# |> Map.put(:timestamp_exit_monotonic, ts_exit_monotonic)
|
||||||
|
# |> Map.put(:duration_ms, duration_ms)
|
||||||
|
|> Map.put(:result, %{type: :ok, value: sanitized_result_val})
|
||||||
|
|
||||||
|
# For print indent
|
||||||
|
_ = Tdd.Debug.decrement_depth()
|
||||||
|
|
||||||
|
# Update call stack: Add finalized_node to parent's children or session_roots
|
||||||
|
new_call_stack_after_success =
|
||||||
|
case parent_stack_nodes do
|
||||||
|
[parent_on_stack | grand_parent_stack_nodes] ->
|
||||||
|
updated_parent = %{
|
||||||
|
parent_on_stack
|
||||||
|
| children: [finalized_node | parent_on_stack.children]
|
||||||
|
}
|
||||||
|
|
||||||
|
[updated_parent | grand_parent_stack_nodes]
|
||||||
|
|
||||||
|
# This was a root call
|
||||||
|
[] ->
|
||||||
|
Process.put(:tdd_debug_session_roots, [
|
||||||
|
finalized_node | Process.get(:tdd_debug_session_roots, [])
|
||||||
|
])
|
||||||
|
|
||||||
|
# New call_stack is empty for this branch
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
|
||||||
|
Process.put(:tdd_debug_call_stack, new_call_stack_after_success)
|
||||||
|
|
||||||
|
# Logging Return
|
||||||
|
IO.puts(
|
||||||
|
"#{indent}RETURN from #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}: #{inspect(result_val)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return actual result
|
||||||
|
result_val
|
||||||
|
rescue
|
||||||
|
exception_class_err ->
|
||||||
|
# --- On Error Exit: Finalize Node & Log ---
|
||||||
|
error_instance_err = exception_class_err
|
||||||
|
stacktrace_err = __STACKTRACE__
|
||||||
|
|
||||||
|
[erroring_node_placeholder | parent_stack_nodes_err] =
|
||||||
|
Process.get(:tdd_debug_call_stack)
|
||||||
|
|
||||||
|
ts_exit_monotonic_err = System.monotonic_time()
|
||||||
|
|
||||||
|
duration_ms_err =
|
||||||
|
System.convert_time_unit(
|
||||||
|
ts_exit_monotonic_err - erroring_node_placeholder.timestamp_enter_monotonic,
|
||||||
|
:native,
|
||||||
|
:millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
sanitized_error_info =
|
||||||
|
Tdd.Debug.TracerData.sanitize_error(error_instance_err, stacktrace_err)
|
||||||
|
|
||||||
|
finalized_error_node =
|
||||||
|
erroring_node_placeholder
|
||||||
|
|> Map.put(:timestamp_exit_monotonic, ts_exit_monotonic_err)
|
||||||
|
|> Map.put(:duration_ms, duration_ms_err)
|
||||||
|
|> Map.put(:result, sanitized_error_info)
|
||||||
|
|
||||||
|
# For print indent
|
||||||
|
_ = Tdd.Debug.decrement_depth()
|
||||||
|
|
||||||
|
# Update call stack: Add finalized_error_node to parent's children or session_roots
|
||||||
|
new_call_stack_after_error =
|
||||||
|
case parent_stack_nodes_err do
|
||||||
|
[parent_on_stack_err | grand_parent_stack_nodes_err] ->
|
||||||
|
updated_parent_err = %{
|
||||||
|
parent_on_stack_err
|
||||||
|
| children: [finalized_error_node | parent_on_stack_err.children]
|
||||||
|
}
|
||||||
|
|
||||||
|
[updated_parent_err | grand_parent_stack_nodes_err]
|
||||||
|
|
||||||
|
# This was a root call that errored
|
||||||
|
[] ->
|
||||||
|
Process.put(:tdd_debug_session_roots, [
|
||||||
|
finalized_error_node | Process.get(:tdd_debug_session_roots, [])
|
||||||
|
])
|
||||||
|
|
||||||
|
# New call_stack is empty for this branch
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
|
||||||
|
Process.put(:tdd_debug_call_stack, new_call_stack_after_error)
|
||||||
|
|
||||||
|
# Logging Error
|
||||||
|
IO.puts(
|
||||||
|
"#{indent}ERROR in #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}: #{inspect(error_instance_err)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reraise the original error
|
||||||
|
reraise error_instance_err, stacktrace_err
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# --- Not Traced: Execute Original Body Directly ---
|
||||||
|
unquote(original_body_ast)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- Final Definition ---
|
||||||
|
final_definition_ast =
|
||||||
|
quote location: :keep do
|
||||||
|
Kernel.unquote(type)(
|
||||||
|
unquote(new_call_ast),
|
||||||
|
do: unquote(traced_body_inner_ast)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
final_definition_ast
|
||||||
|
end
|
||||||
|
|
||||||
|
# --- TDD Graph Printing (Kept as it was, assuming it's for a different purpose) ---
|
||||||
|
@doc "Prints a formatted representation of a TDD graph structure."
|
||||||
|
def print_tdd_graph(id, store_module \\ Tdd.Store) do
|
||||||
|
IO.puts("--- TDD Graph (ID: #{id}) ---")
|
||||||
|
do_print_tdd_node(id, 0, MapSet.new(), store_module)
|
||||||
|
IO.puts("------------------------")
|
||||||
|
end
|
||||||
|
|
||||||
|
defp do_print_tdd_node(id, indent_level, visited, store_module) do
|
||||||
|
prefix = String.duplicate(" ", indent_level)
|
||||||
|
|
||||||
|
if MapSet.member?(visited, id) do
|
||||||
|
IO.puts("#{prefix}ID #{id} -> [Seen, recursive link]")
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
new_visited = MapSet.put(visited, id)
|
||||||
|
# Assumes store_module.get_node/1 exists
|
||||||
|
case store_module.get_node(id) do
|
||||||
|
{:ok, :true_terminal} ->
|
||||||
|
IO.puts("#{prefix}ID #{id} -> TRUE")
|
||||||
|
|
||||||
|
{:ok, :false_terminal} ->
|
||||||
|
IO.puts("#{prefix}ID #{id} -> FALSE")
|
||||||
|
|
||||||
|
{:ok, {var, y_id, n_id, dc_id}} ->
|
||||||
|
IO.puts("#{prefix}ID #{id}: IF #{inspect(var)}")
|
||||||
|
IO.puts("#{prefix} ├─ Yes (to ID #{y_id}):")
|
||||||
|
do_print_tdd_node(y_id, indent_level + 2, new_visited, store_module)
|
||||||
|
IO.puts("#{prefix} ├─ No (to ID #{n_id}):")
|
||||||
|
do_print_tdd_node(n_id, indent_level + 2, new_visited, store_module)
|
||||||
|
IO.puts("#{prefix} └─ DC (to ID #{dc_id}):")
|
||||||
|
do_print_tdd_node(dc_id, indent_level + 2, new_visited, store_module)
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
IO.puts("#{prefix}ID #{id}: ERROR - #{reason}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Logs a value with a label, using IO.inspect."
|
||||||
|
def log(value, label, opts \\ []) do
|
||||||
|
# You can add a flag here to disable all logging globally
|
||||||
|
if true do
|
||||||
|
# Default options for better visibility
|
||||||
|
default_opts = [width: 120, pretty: true]
|
||||||
|
final_opts = Keyword.merge(default_opts, opts)
|
||||||
|
IO.inspect(value, [{:label, "[DEBUG] #{label}"} | final_opts])
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return the original value to allow piping
|
||||||
|
value
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Gets and increments a depth counter for tracing recursion."
|
||||||
|
defp get_depth() do
|
||||||
|
depth = Process.get(:debug_depth, 0)
|
||||||
|
Process.put(:debug_depth, depth + 1)
|
||||||
|
String.duplicate(" ", depth)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Decrements the depth counter."
|
||||||
|
defp dec_depth() do
|
||||||
|
depth = Process.get(:debug_depth, 1) |> Kernel.-(1) |> max(0)
|
||||||
|
Process.put(:debug_depth, depth)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Logs a message with indentation for tracing recursion."
|
||||||
|
def log_entry(label) do
|
||||||
|
prefix = get_depth()
|
||||||
|
IO.inspect(prefix, label: "PREFIX")
|
||||||
|
IO.puts("#{prefix}[DEBUG] >> #{label}")
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Logs a return value with indentation."
|
||||||
|
def log_exit(value, label) do
|
||||||
|
dec_depth()
|
||||||
|
prefix = String.duplicate(" ", Process.get(:debug_depth, 0))
|
||||||
|
IO.inspect(value, label: prefix <> "[DEBUG] << #{label}")
|
||||||
|
# Return the value
|
||||||
|
value
|
||||||
|
end
|
||||||
|
end
|
||||||
2092
lib/til.ex
2092
lib/til.ex
File diff suppressed because it is too large
Load Diff
@ -193,6 +193,7 @@ defmodule Til.Parser do
|
|||||||
case parse_atom_datum(source, state, parent_id) do
|
case parse_atom_datum(source, state, parent_id) do
|
||||||
{:ok, node_id, rest, new_state} ->
|
{:ok, node_id, rest, new_state} ->
|
||||||
{:ok, node_id, rest, new_state}
|
{:ok, node_id, rest, new_state}
|
||||||
|
|
||||||
{:error, :not_atom} ->
|
{:error, :not_atom} ->
|
||||||
# Failed to parse as a specific atom (e.g. ":foo").
|
# Failed to parse as a specific atom (e.g. ":foo").
|
||||||
# It could be a symbol that starts with ':' (e.g. if we allow ":" as a symbol).
|
# It could be a symbol that starts with ':' (e.g. if we allow ":" as a symbol).
|
||||||
@ -200,9 +201,16 @@ defmodule Til.Parser do
|
|||||||
case parse_symbol_datum(source, state, parent_id) do
|
case parse_symbol_datum(source, state, parent_id) do
|
||||||
{:ok, node_id, rest, new_state} ->
|
{:ok, node_id, rest, new_state} ->
|
||||||
{:ok, node_id, rest, new_state}
|
{:ok, node_id, rest, new_state}
|
||||||
|
|
||||||
{:error, :not_symbol} ->
|
{:error, :not_symbol} ->
|
||||||
# If it started with ':' but wasn't a valid atom and also not a valid symbol
|
# If it started with ':' but wasn't a valid atom and also not a valid symbol
|
||||||
create_error_node_and_advance(source, state, parent_id, 1, "Unknown token starting with ':'")
|
create_error_node_and_advance(
|
||||||
|
source,
|
||||||
|
state,
|
||||||
|
parent_id,
|
||||||
|
1,
|
||||||
|
"Unknown token starting with ':'"
|
||||||
|
)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -212,18 +220,24 @@ defmodule Til.Parser do
|
|||||||
case parse_integer_datum(source, state, parent_id) do
|
case parse_integer_datum(source, state, parent_id) do
|
||||||
{:ok, node_id, rest, new_state} ->
|
{:ok, node_id, rest, new_state} ->
|
||||||
{:ok, node_id, rest, new_state}
|
{:ok, node_id, rest, new_state}
|
||||||
|
|
||||||
{:error, :not_integer} ->
|
{:error, :not_integer} ->
|
||||||
# Not an integer, try parsing as a symbol
|
# Not an integer, try parsing as a symbol
|
||||||
case parse_symbol_datum(source, state, parent_id) do
|
case parse_symbol_datum(source, state, parent_id) do
|
||||||
{:ok, node_id, rest, new_state} ->
|
{:ok, node_id, rest, new_state} ->
|
||||||
{:ok, node_id, rest, new_state}
|
{:ok, node_id, rest, new_state}
|
||||||
|
|
||||||
{:error, :not_symbol} ->
|
{:error, :not_symbol} ->
|
||||||
# Not a symbol either. Consume 1 char for the unknown token.
|
# Not a symbol either. Consume 1 char for the unknown token.
|
||||||
create_error_node_and_advance(source, state, parent_id, 1, "Unknown token")
|
create_error_node_and_advance(source, state, parent_id, 1, "Unknown token")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end # end inner cond
|
end
|
||||||
end # end outer cond
|
|
||||||
|
# end inner cond
|
||||||
|
end
|
||||||
|
|
||||||
|
# end outer cond
|
||||||
end
|
end
|
||||||
|
|
||||||
# --- Datum Parsing Helpers --- (parse_string_datum, process_string_content)
|
# --- Datum Parsing Helpers --- (parse_string_datum, process_string_content)
|
||||||
@ -283,7 +297,7 @@ defmodule Til.Parser do
|
|||||||
raw_token = opening_tick <> content_segment <> closing_tick
|
raw_token = opening_tick <> content_segment <> closing_tick
|
||||||
|
|
||||||
rest_of_source =
|
rest_of_source =
|
||||||
String.slice(source_after_opening_tick, (idx_closing_tick_in_segment + 1)..-1)
|
String.slice(source_after_opening_tick, (idx_closing_tick_in_segment + 1)..-1//1)
|
||||||
|
|
||||||
state_at_node_end = advance_pos(initial_state_for_token, raw_token)
|
state_at_node_end = advance_pos(initial_state_for_token, raw_token)
|
||||||
|
|
||||||
@ -339,7 +353,7 @@ defmodule Til.Parser do
|
|||||||
|> String.length()
|
|> String.length()
|
||||||
|
|
||||||
spaces_to_remove = min(current_leading_spaces_count, strip_indent)
|
spaces_to_remove = min(current_leading_spaces_count, strip_indent)
|
||||||
String.slice(line, spaces_to_remove..-1)
|
String.slice(line, spaces_to_remove..-1//1)
|
||||||
end)
|
end)
|
||||||
|
|
||||||
all_processed_lines = [first_line | processed_rest_lines]
|
all_processed_lines = [first_line | processed_rest_lines]
|
||||||
@ -356,9 +370,10 @@ defmodule Til.Parser do
|
|||||||
# The colon itself is part of the atom's raw string.
|
# The colon itself is part of the atom's raw string.
|
||||||
# The `atom_name_part` is what comes after the colon.
|
# The `atom_name_part` is what comes after the colon.
|
||||||
case Regex.run(~r/^:([^\s\(\)\[\]\{\}]+)/, source) do
|
case Regex.run(~r/^:([^\s\(\)\[\]\{\}]+)/, source) do
|
||||||
[raw_atom_str, atom_name_part] -> # raw_atom_str is like ":foo", atom_name_part is "foo"
|
# raw_atom_str is like ":foo", atom_name_part is "foo"
|
||||||
|
[raw_atom_str, atom_name_part] ->
|
||||||
# The regex [^...]+ ensures atom_name_part is not empty.
|
# The regex [^...]+ ensures atom_name_part is not empty.
|
||||||
rest_after_atom = String.slice(source, String.length(raw_atom_str)..-1)
|
rest_after_atom = String.slice(source, String.length(raw_atom_str)..-1//1)
|
||||||
start_offset = state.offset
|
start_offset = state.offset
|
||||||
start_line = state.line
|
start_line = state.line
|
||||||
start_col = state.col
|
start_col = state.col
|
||||||
@ -387,9 +402,11 @@ defmodule Til.Parser do
|
|||||||
line: end_line,
|
line: end_line,
|
||||||
col: end_col
|
col: end_col
|
||||||
}
|
}
|
||||||
|
|
||||||
{:ok, new_node_id, rest_after_atom, final_state}
|
{:ok, new_node_id, rest_after_atom, final_state}
|
||||||
|
|
||||||
_ -> # No match (nil or list that doesn't conform, e.g., just ":" or ": followed by space/delimiter")
|
# No match (nil or list that doesn't conform, e.g., just ":" or ": followed by space/delimiter")
|
||||||
|
_ ->
|
||||||
{:error, :not_atom}
|
{:error, :not_atom}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -426,7 +443,7 @@ defmodule Til.Parser do
|
|||||||
# Regex excludes common delimiters. `m{` is handled before symbol parsing.
|
# Regex excludes common delimiters. `m{` is handled before symbol parsing.
|
||||||
case Regex.run(~r/^([^\s\(\)\[\]\{\}]+)/, source) do
|
case Regex.run(~r/^([^\s\(\)\[\]\{\}]+)/, source) do
|
||||||
[raw_symbol | _] ->
|
[raw_symbol | _] ->
|
||||||
rest_after_symbol = String.slice(source, String.length(raw_symbol)..-1)
|
rest_after_symbol = String.slice(source, String.length(raw_symbol)..-1//1)
|
||||||
start_offset = state.offset
|
start_offset = state.offset
|
||||||
start_line = state.line
|
start_line = state.line
|
||||||
start_col = state.col
|
start_col = state.col
|
||||||
@ -492,7 +509,8 @@ defmodule Til.Parser do
|
|||||||
|
|
||||||
defp parse_s_expression(original_source_string, source, state, parent_id) do
|
defp parse_s_expression(original_source_string, source, state, parent_id) do
|
||||||
# Standard S-expression parsing via parse_collection
|
# Standard S-expression parsing via parse_collection
|
||||||
result = parse_collection(
|
result =
|
||||||
|
parse_collection(
|
||||||
original_source_string,
|
original_source_string,
|
||||||
source,
|
source,
|
||||||
state,
|
state,
|
||||||
@ -515,8 +533,7 @@ defmodule Til.Parser do
|
|||||||
|
|
||||||
final_state = %{
|
final_state = %{
|
||||||
state_after_collection
|
state_after_collection
|
||||||
| nodes:
|
| nodes: Map.put(state_after_collection.nodes, transformed_node.id, transformed_node)
|
||||||
Map.put(state_after_collection.nodes, transformed_node.id, transformed_node)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{:ok, transformed_node.id, rest_after_collection, final_state}
|
{:ok, transformed_node.id, rest_after_collection, final_state}
|
||||||
@ -548,7 +565,8 @@ defmodule Til.Parser do
|
|||||||
# into a :lambda_expression node.
|
# into a :lambda_expression node.
|
||||||
defp transform_to_lambda_expression(s_expr_node, nodes_map) do
|
defp transform_to_lambda_expression(s_expr_node, nodes_map) do
|
||||||
# s_expr_node.children = [fn_symbol_id, params_s_expr_id, body_form1_id, ...]
|
# s_expr_node.children = [fn_symbol_id, params_s_expr_id, body_form1_id, ...]
|
||||||
_fn_symbol_id = Enum.at(s_expr_node.children, 0) # Already checked
|
# Already checked
|
||||||
|
_fn_symbol_id = Enum.at(s_expr_node.children, 0)
|
||||||
|
|
||||||
if length(s_expr_node.children) < 2 do
|
if length(s_expr_node.children) < 2 do
|
||||||
%{s_expr_node | parsing_error: "Malformed 'fn' expression: missing parameters list."}
|
%{s_expr_node | parsing_error: "Malformed 'fn' expression: missing parameters list."}
|
||||||
@ -557,7 +575,11 @@ defmodule Til.Parser do
|
|||||||
params_s_expr_node = Map.get(nodes_map, params_s_expr_id)
|
params_s_expr_node = Map.get(nodes_map, params_s_expr_id)
|
||||||
|
|
||||||
if !(params_s_expr_node && params_s_expr_node.ast_node_type == :s_expression) do
|
if !(params_s_expr_node && params_s_expr_node.ast_node_type == :s_expression) do
|
||||||
Map.put(s_expr_node, :parsing_error, "Malformed 'fn' expression: parameters list is not an S-expression.")
|
Map.put(
|
||||||
|
s_expr_node,
|
||||||
|
:parsing_error,
|
||||||
|
"Malformed 'fn' expression: parameters list is not an S-expression."
|
||||||
|
)
|
||||||
else
|
else
|
||||||
# Children of the parameters S-expression, e.g. for (fn ((a integer) (b atom) atom) ...),
|
# Children of the parameters S-expression, e.g. for (fn ((a integer) (b atom) atom) ...),
|
||||||
# param_s_expr_children_ids would be IDs of [(a integer), (b atom), atom]
|
# param_s_expr_children_ids would be IDs of [(a integer), (b atom), atom]
|
||||||
@ -579,33 +601,50 @@ defmodule Til.Parser do
|
|||||||
all_arg_specs_valid =
|
all_arg_specs_valid =
|
||||||
Enum.all?(arg_spec_node_ids, fn arg_id ->
|
Enum.all?(arg_spec_node_ids, fn arg_id ->
|
||||||
arg_node = Map.get(nodes_map, arg_id)
|
arg_node = Map.get(nodes_map, arg_id)
|
||||||
|
|
||||||
case arg_node do
|
case arg_node do
|
||||||
%{ast_node_type: :symbol} -> true # e.g. x
|
# e.g. x
|
||||||
%{ast_node_type: :s_expression, children: s_children} -> # e.g. (x integer)
|
%{ast_node_type: :symbol} ->
|
||||||
|
true
|
||||||
|
|
||||||
|
# e.g. (x integer)
|
||||||
|
%{ast_node_type: :s_expression, children: s_children} ->
|
||||||
if length(s_children) == 2 do
|
if length(s_children) == 2 do
|
||||||
param_sym_node = Map.get(nodes_map, hd(s_children))
|
param_sym_node = Map.get(nodes_map, hd(s_children))
|
||||||
type_spec_node = Map.get(nodes_map, hd(tl(s_children)))
|
type_spec_node = Map.get(nodes_map, hd(tl(s_children)))
|
||||||
|
|
||||||
param_sym_node && param_sym_node.ast_node_type == :symbol &&
|
param_sym_node && param_sym_node.ast_node_type == :symbol &&
|
||||||
type_spec_node && (type_spec_node.ast_node_type == :symbol || type_spec_node.ast_node_type == :s_expression)
|
type_spec_node &&
|
||||||
|
(type_spec_node.ast_node_type == :symbol ||
|
||||||
|
type_spec_node.ast_node_type == :s_expression)
|
||||||
else
|
else
|
||||||
false # Not a valid (param_symbol type_spec) structure
|
# Not a valid (param_symbol type_spec) structure
|
||||||
|
false
|
||||||
end
|
end
|
||||||
_ -> false # Not a symbol or valid S-expression for arg spec
|
|
||||||
|
# Not a symbol or valid S-expression for arg spec
|
||||||
|
_ ->
|
||||||
|
false
|
||||||
end
|
end
|
||||||
end)
|
end)
|
||||||
|
|
||||||
# Validate return_type_spec_node_id: must be nil or a valid type specifier node
|
# Validate return_type_spec_node_id: must be nil or a valid type specifier node
|
||||||
return_type_spec_valid =
|
return_type_spec_valid =
|
||||||
if is_nil(return_type_spec_node_id) do
|
if is_nil(return_type_spec_node_id) do
|
||||||
true # Inferred return type is valid
|
# Inferred return type is valid
|
||||||
|
true
|
||||||
else
|
else
|
||||||
ret_type_node = Map.get(nodes_map, return_type_spec_node_id)
|
ret_type_node = Map.get(nodes_map, return_type_spec_node_id)
|
||||||
ret_type_node && (ret_type_node.ast_node_type == :symbol || ret_type_node.ast_node_type == :s_expression)
|
|
||||||
|
ret_type_node &&
|
||||||
|
(ret_type_node.ast_node_type == :symbol ||
|
||||||
|
ret_type_node.ast_node_type == :s_expression)
|
||||||
end
|
end
|
||||||
|
|
||||||
if all_arg_specs_valid && return_type_spec_valid do
|
if all_arg_specs_valid && return_type_spec_valid do
|
||||||
body_node_ids = Enum.drop(s_expr_node.children, 2) # Body starts after 'fn' and params_s_expr
|
# Body starts after 'fn' and params_s_expr
|
||||||
|
body_node_ids = Enum.drop(s_expr_node.children, 2)
|
||||||
|
|
||||||
Map.merge(s_expr_node, %{
|
Map.merge(s_expr_node, %{
|
||||||
:ast_node_type => :lambda_expression,
|
:ast_node_type => :lambda_expression,
|
||||||
:params_s_expr_id => params_s_expr_id,
|
:params_s_expr_id => params_s_expr_id,
|
||||||
@ -617,10 +656,17 @@ defmodule Til.Parser do
|
|||||||
# Determine more specific error message
|
# Determine more specific error message
|
||||||
error_message =
|
error_message =
|
||||||
cond do
|
cond do
|
||||||
!all_arg_specs_valid -> "Malformed 'fn' expression: invalid argument specification(s)."
|
!all_arg_specs_valid ->
|
||||||
!return_type_spec_valid -> "Malformed 'fn' expression: invalid return type specification."
|
"Malformed 'fn' expression: invalid argument specification(s)."
|
||||||
true -> "Malformed 'fn' expression." # Generic fallback
|
|
||||||
|
!return_type_spec_valid ->
|
||||||
|
"Malformed 'fn' expression: invalid return type specification."
|
||||||
|
|
||||||
|
# Generic fallback
|
||||||
|
true ->
|
||||||
|
"Malformed 'fn' expression."
|
||||||
end
|
end
|
||||||
|
|
||||||
Map.put(s_expr_node, :parsing_error, error_message)
|
Map.put(s_expr_node, :parsing_error, error_message)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -931,7 +977,7 @@ defmodule Til.Parser do
|
|||||||
[ws | _] = whitespace_match
|
[ws | _] = whitespace_match
|
||||||
new_offset = o + String.length(ws)
|
new_offset = o + String.length(ws)
|
||||||
{new_line, new_col} = calculate_new_line_col(ws, l, c)
|
{new_line, new_col} = calculate_new_line_col(ws, l, c)
|
||||||
remaining_source = String.slice(source, String.length(ws)..-1)
|
remaining_source = String.slice(source, String.length(ws)..-1//1)
|
||||||
{:ok, remaining_source, %{state | offset: new_offset, line: new_line, col: new_col}}
|
{:ok, remaining_source, %{state | offset: new_offset, line: new_line, col: new_col}}
|
||||||
else
|
else
|
||||||
if String.length(source) == 0 do
|
if String.length(source) == 0 do
|
||||||
|
|||||||
1634
lib/til/type.ex
1634
lib/til/type.ex
File diff suppressed because it is too large
Load Diff
146
lib/tilly/bdd.ex
146
lib/tilly/bdd.ex
@ -1,146 +0,0 @@
|
|||||||
defmodule Tilly.BDD do
|
|
||||||
@moduledoc """
|
|
||||||
Manages the BDD store, including hash-consing of BDD nodes.
|
|
||||||
The BDD store is expected to be part of a `typing_ctx` map under the key `:bdd_store`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
alias Tilly.BDD.Node
|
|
||||||
|
|
||||||
@false_node_id 0
|
|
||||||
@true_node_id 1
|
|
||||||
@initial_next_node_id 2
|
|
||||||
@universal_ops_module :universal_ops
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Initializes the BDD store within the typing context.
|
|
||||||
Pre-interns canonical `false` and `true` BDD nodes.
|
|
||||||
"""
|
|
||||||
def init_bdd_store(typing_ctx) when is_map(typing_ctx) do
|
|
||||||
false_structure = Node.mk_false()
|
|
||||||
true_structure = Node.mk_true()
|
|
||||||
|
|
||||||
bdd_store = %{
|
|
||||||
nodes_by_structure: %{
|
|
||||||
{false_structure, @universal_ops_module} => @false_node_id,
|
|
||||||
{true_structure, @universal_ops_module} => @true_node_id
|
|
||||||
},
|
|
||||||
structures_by_id: %{
|
|
||||||
@false_node_id => %{structure: false_structure, ops_module: @universal_ops_module},
|
|
||||||
@true_node_id => %{structure: true_structure, ops_module: @universal_ops_module}
|
|
||||||
},
|
|
||||||
next_node_id: @initial_next_node_id,
|
|
||||||
ops_cache: %{} # Cache for BDD operations {op_key, id1, id2} -> result_id
|
|
||||||
}
|
|
||||||
|
|
||||||
Map.put(typing_ctx, :bdd_store, bdd_store)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets an existing BDD node ID or interns a new one if it's not already in the store.
|
|
||||||
|
|
||||||
Returns a tuple `{new_typing_ctx, node_id}`.
|
|
||||||
The `typing_ctx` is updated if a new node is interned.
|
|
||||||
"""
|
|
||||||
def get_or_intern_node(typing_ctx, logical_structure, ops_module_atom) do
|
|
||||||
bdd_store = Map.get(typing_ctx, :bdd_store)
|
|
||||||
|
|
||||||
unless bdd_store do
|
|
||||||
raise ArgumentError, "BDD store not initialized in typing_ctx. Call init_bdd_store first."
|
|
||||||
end
|
|
||||||
|
|
||||||
key = {logical_structure, ops_module_atom}
|
|
||||||
|
|
||||||
case Map.get(bdd_store.nodes_by_structure, key) do
|
|
||||||
nil ->
|
|
||||||
# Node not found, intern it
|
|
||||||
node_id = bdd_store.next_node_id
|
|
||||||
|
|
||||||
new_nodes_by_structure = Map.put(bdd_store.nodes_by_structure, key, node_id)
|
|
||||||
|
|
||||||
node_data = %{structure: logical_structure, ops_module: ops_module_atom}
|
|
||||||
new_structures_by_id = Map.put(bdd_store.structures_by_id, node_id, node_data)
|
|
||||||
|
|
||||||
new_next_node_id = node_id + 1
|
|
||||||
|
|
||||||
new_bdd_store =
|
|
||||||
%{
|
|
||||||
bdd_store
|
|
||||||
| nodes_by_structure: new_nodes_by_structure,
|
|
||||||
structures_by_id: new_structures_by_id,
|
|
||||||
next_node_id: new_next_node_id
|
|
||||||
}
|
|
||||||
|
|
||||||
new_typing_ctx = Map.put(typing_ctx, :bdd_store, new_bdd_store)
|
|
||||||
{new_typing_ctx, node_id}
|
|
||||||
|
|
||||||
existing_node_id ->
|
|
||||||
# Node found
|
|
||||||
{typing_ctx, existing_node_id}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Retrieves the node's structure and ops_module from the BDD store.
|
|
||||||
Returns `%{structure: logical_structure_tuple, ops_module: ops_module_atom}` or `nil` if not found.
|
|
||||||
"""
|
|
||||||
def get_node_data(typing_ctx, node_id) do
|
|
||||||
with %{bdd_store: %{structures_by_id: structures_by_id}} <- typing_ctx,
|
|
||||||
data when not is_nil(data) <- Map.get(structures_by_id, node_id) do
|
|
||||||
data
|
|
||||||
else
|
|
||||||
_ -> nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if the given node ID corresponds to the canonical `false` BDD node.
|
|
||||||
"""
|
|
||||||
def is_false_node?(typing_ctx, node_id) do
|
|
||||||
# Optimized check for the predefined ID
|
|
||||||
if node_id == @false_node_id do
|
|
||||||
true
|
|
||||||
else
|
|
||||||
# Fallback for cases where a node might be structurally false but not have the canonical ID.
|
|
||||||
# This should ideally not happen with proper interning of Node.mk_false() via get_or_intern_node.
|
|
||||||
case get_node_data(typing_ctx, node_id) do
|
|
||||||
%{structure: structure, ops_module: @universal_ops_module} ->
|
|
||||||
structure == Node.mk_false()
|
|
||||||
_ ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if the given node ID corresponds to the canonical `true` BDD node.
|
|
||||||
"""
|
|
||||||
def is_true_node?(typing_ctx, node_id) do
|
|
||||||
# Optimized check for the predefined ID
|
|
||||||
if node_id == @true_node_id do
|
|
||||||
true
|
|
||||||
else
|
|
||||||
# Fallback for cases where a node might be structurally true but not have the canonical ID.
|
|
||||||
case get_node_data(typing_ctx, node_id) do
|
|
||||||
%{structure: structure, ops_module: @universal_ops_module} ->
|
|
||||||
structure == Node.mk_true()
|
|
||||||
_ ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the canonical ID for the `false` BDD node.
|
|
||||||
"""
|
|
||||||
def false_node_id(), do: @false_node_id
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the canonical ID for the `true` BDD node.
|
|
||||||
"""
|
|
||||||
def true_node_id(), do: @true_node_id
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the atom used as the `ops_module` for universal nodes like `true` and `false`.
|
|
||||||
"""
|
|
||||||
def universal_ops_module(), do: @universal_ops_module
|
|
||||||
end
|
|
||||||
@ -1,89 +0,0 @@
|
|||||||
defmodule Tilly.BDD.AtomBoolOps do
|
|
||||||
@moduledoc """
|
|
||||||
BDD operations module for sets of atoms.
|
|
||||||
Elements are atoms, and leaf values are booleans.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Compares two atoms.
|
|
||||||
Returns `:lt`, `:eq`, or `:gt`.
|
|
||||||
"""
|
|
||||||
def compare_elements(elem1, elem2) when is_atom(elem1) and is_atom(elem2) do
|
|
||||||
cond do
|
|
||||||
elem1 < elem2 -> :lt
|
|
||||||
elem1 > elem2 -> :gt
|
|
||||||
true -> :eq
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if two atoms are equal.
|
|
||||||
"""
|
|
||||||
def equal_element?(elem1, elem2) when is_atom(elem1) and is_atom(elem2) do
|
|
||||||
elem1 == elem2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Hashes an atom.
|
|
||||||
"""
|
|
||||||
def hash_element(elem) when is_atom(elem) do
|
|
||||||
# erlang.phash2 is suitable for term hashing
|
|
||||||
:erlang.phash2(elem)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
The leaf value representing an empty set of atoms (false).
|
|
||||||
"""
|
|
||||||
def empty_leaf(), do: false
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
The leaf value representing the universal set of atoms (true).
|
|
||||||
This is used if a BDD simplifies to a state where all atoms of this kind are included.
|
|
||||||
"""
|
|
||||||
def any_leaf(), do: true
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if a leaf value represents an empty set.
|
|
||||||
"""
|
|
||||||
def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do
|
|
||||||
leaf_val == false
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the union of two leaf values.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 or leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the intersection of two leaf values.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def intersection_leaves(_typing_ctx, leaf1, leaf2)
|
|
||||||
when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 and leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the negation of a leaf value.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do
|
|
||||||
not leaf
|
|
||||||
end
|
|
||||||
|
|
||||||
# def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
# leaf1 and (not leaf2)
|
|
||||||
# end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Tests a leaf value to determine if it represents an empty, full, or other set.
|
|
||||||
Returns `:empty`, `:full`, or `:other`.
|
|
||||||
"""
|
|
||||||
def test_leaf_value(true), do: :full
|
|
||||||
def test_leaf_value(false), do: :empty
|
|
||||||
# Add a clause for other types if atoms could have non-boolean leaf values
|
|
||||||
# def test_leaf_value(_other), do: :other
|
|
||||||
end
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
defmodule Tilly.BDD.IntegerBoolOps do
|
|
||||||
@moduledoc """
|
|
||||||
BDD Operations module for BDDs where elements are integers and leaves are booleans.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Compares two integer elements.
|
|
||||||
Returns `:lt`, `:eq`, or `:gt`.
|
|
||||||
"""
|
|
||||||
def compare_elements(elem1, elem2) when is_integer(elem1) and is_integer(elem2) do
|
|
||||||
cond do
|
|
||||||
elem1 < elem2 -> :lt
|
|
||||||
elem1 > elem2 -> :gt
|
|
||||||
true -> :eq
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if two integer elements are equal.
|
|
||||||
"""
|
|
||||||
def equal_element?(elem1, elem2) when is_integer(elem1) and is_integer(elem2) do
|
|
||||||
elem1 == elem2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Hashes an integer element.
|
|
||||||
"""
|
|
||||||
def hash_element(elem) when is_integer(elem) do
|
|
||||||
elem
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the leaf value representing emptiness (false).
|
|
||||||
"""
|
|
||||||
def empty_leaf(), do: false
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the leaf value representing universality (true).
|
|
||||||
"""
|
|
||||||
def any_leaf(), do: true
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if the leaf value represents emptiness.
|
|
||||||
"""
|
|
||||||
def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do
|
|
||||||
leaf_val == false
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the union of two boolean leaf values.
|
|
||||||
The `_typing_ctx` is ignored for this simple ops module.
|
|
||||||
"""
|
|
||||||
def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 or leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the intersection of two boolean leaf values.
|
|
||||||
The `_typing_ctx` is ignored for this simple ops module.
|
|
||||||
"""
|
|
||||||
def intersection_leaves(_typing_ctx, leaf1, leaf2)
|
|
||||||
when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 and leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the negation of a boolean leaf value.
|
|
||||||
The `_typing_ctx` is ignored for this simple ops module.
|
|
||||||
"""
|
|
||||||
def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do
|
|
||||||
not leaf
|
|
||||||
end
|
|
||||||
|
|
||||||
# def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
# leaf1 and (not leaf2)
|
|
||||||
# end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Tests a leaf value to determine if it represents an empty, full, or other set.
|
|
||||||
For boolean leaves with integers, this mirrors AtomBoolOps and StringBoolOps.
|
|
||||||
Returns `:empty`, `:full`, or `:other`.
|
|
||||||
"""
|
|
||||||
def test_leaf_value(true), do: :full
|
|
||||||
def test_leaf_value(false), do: :empty
|
|
||||||
# If integer BDDs could have non-boolean leaves that are not empty/full:
|
|
||||||
# def test_leaf_value(_other_leaf_value), do: :other
|
|
||||||
end
|
|
||||||
@ -1,124 +0,0 @@
|
|||||||
defmodule Tilly.BDD.Node do
|
|
||||||
@moduledoc """
|
|
||||||
Defines the structure of BDD nodes and provides basic helper functions.
|
|
||||||
|
|
||||||
BDD nodes can be one of the following Elixir terms:
|
|
||||||
- `true`: Represents the universal set BDD.
|
|
||||||
- `false`: Represents the empty set BDD.
|
|
||||||
- `{:leaf, leaf_value_id}`: Represents a leaf node.
|
|
||||||
`leaf_value_id`'s interpretation depends on the specific BDD's `ops_module`.
|
|
||||||
- `{:split, element_id, positive_child_id, ignore_child_id, negative_child_id}`:
|
|
||||||
Represents an internal decision node.
|
|
||||||
`element_id` is the value being split upon.
|
|
||||||
`positive_child_id`, `ignore_child_id`, `negative_child_id` are IDs of other BDD nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@typedoc "A BDD node representing the universal set."
|
|
||||||
@type true_node :: true
|
|
||||||
|
|
||||||
@typedoc "A BDD node representing the empty set."
|
|
||||||
@type false_node :: false
|
|
||||||
|
|
||||||
@typedoc "A BDD leaf node."
|
|
||||||
@type leaf_node(leaf_value) :: {:leaf, leaf_value}
|
|
||||||
|
|
||||||
@typedoc "A BDD split node."
|
|
||||||
@type split_node(element, node_id) ::
|
|
||||||
{:split, element, node_id, node_id, node_id}
|
|
||||||
|
|
||||||
@typedoc "Any valid BDD node structure."
|
|
||||||
@type t(element, leaf_value, node_id) ::
|
|
||||||
true_node()
|
|
||||||
| false_node()
|
|
||||||
| leaf_node(leaf_value)
|
|
||||||
| split_node(element, node_id)
|
|
||||||
|
|
||||||
# --- Smart Constructors (Low-Level) ---
|
|
||||||
|
|
||||||
@doc "Creates a true BDD node."
|
|
||||||
@spec mk_true() :: true_node()
|
|
||||||
def mk_true, do: true
|
|
||||||
|
|
||||||
@doc "Creates a false BDD node."
|
|
||||||
@spec mk_false() :: false_node()
|
|
||||||
def mk_false, do: false
|
|
||||||
|
|
||||||
@doc "Creates a leaf BDD node."
|
|
||||||
@spec mk_leaf(leaf_value :: any()) :: leaf_node(any())
|
|
||||||
def mk_leaf(leaf_value_id), do: {:leaf, leaf_value_id}
|
|
||||||
|
|
||||||
@doc "Creates a split BDD node."
|
|
||||||
@spec mk_split(
|
|
||||||
element_id :: any(),
|
|
||||||
positive_child_id :: any(),
|
|
||||||
ignore_child_id :: any(),
|
|
||||||
negative_child_id :: any()
|
|
||||||
) :: split_node(any(), any())
|
|
||||||
def mk_split(element_id, positive_child_id, ignore_child_id, negative_child_id) do
|
|
||||||
{:split, element_id, positive_child_id, ignore_child_id, negative_child_id}
|
|
||||||
end
|
|
||||||
|
|
||||||
# --- Predicates ---
|
|
||||||
|
|
||||||
@doc "Checks if the node is a true node."
|
|
||||||
@spec is_true?(node :: t(any(), any(), any())) :: boolean()
|
|
||||||
def is_true?(true), do: true
|
|
||||||
def is_true?(_other), do: false
|
|
||||||
|
|
||||||
@doc "Checks if the node is a false node."
|
|
||||||
@spec is_false?(node :: t(any(), any(), any())) :: boolean()
|
|
||||||
def is_false?(false), do: true
|
|
||||||
def is_false?(_other), do: false
|
|
||||||
|
|
||||||
@doc "Checks if the node is a leaf node."
|
|
||||||
@spec is_leaf?(node :: t(any(), any(), any())) :: boolean()
|
|
||||||
def is_leaf?({:leaf, _value}), do: true
|
|
||||||
def is_leaf?(_other), do: false
|
|
||||||
|
|
||||||
@doc "Checks if the node is a split node."
|
|
||||||
@spec is_split?(node :: t(any(), any(), any())) :: boolean()
|
|
||||||
def is_split?({:split, _el, _p, _i, _n}), do: true
|
|
||||||
def is_split?(_other), do: false
|
|
||||||
|
|
||||||
# --- Accessors ---
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the value of a leaf node.
|
|
||||||
Raises an error if the node is not a leaf node.
|
|
||||||
"""
|
|
||||||
@spec value(leaf_node :: leaf_node(any())) :: any()
|
|
||||||
def value({:leaf, value_id}), do: value_id
|
|
||||||
def value(other), do: raise(ArgumentError, "Not a leaf node: #{inspect(other)}")
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the element of a split node.
|
|
||||||
Raises an error if the node is not a split node.
|
|
||||||
"""
|
|
||||||
@spec element(split_node :: split_node(any(), any())) :: any()
|
|
||||||
def element({:split, element_id, _, _, _}), do: element_id
|
|
||||||
def element(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}")
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the positive child ID of a split node.
|
|
||||||
Raises an error if the node is not a split node.
|
|
||||||
"""
|
|
||||||
@spec positive_child(split_node :: split_node(any(), any())) :: any()
|
|
||||||
def positive_child({:split, _, p_child_id, _, _}), do: p_child_id
|
|
||||||
def positive_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}")
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the ignore child ID of a split node.
|
|
||||||
Raises an error if the node is not a split node.
|
|
||||||
"""
|
|
||||||
@spec ignore_child(split_node :: split_node(any(), any())) :: any()
|
|
||||||
def ignore_child({:split, _, _, i_child_id, _}), do: i_child_id
|
|
||||||
def ignore_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}")
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns the negative child ID of a split node.
|
|
||||||
Raises an error if the node is not a split node.
|
|
||||||
"""
|
|
||||||
@spec negative_child(split_node :: split_node(any(), any())) :: any()
|
|
||||||
def negative_child({:split, _, _, _, n_child_id}), do: n_child_id
|
|
||||||
def negative_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}")
|
|
||||||
end
|
|
||||||
@ -1,347 +0,0 @@
|
|||||||
defmodule Tilly.BDD.Ops do
|
|
||||||
@moduledoc """
|
|
||||||
Generic BDD algorithms and smart constructors.
|
|
||||||
These functions operate on BDD node IDs and use an `ops_module`
|
|
||||||
to dispatch to specific element/leaf operations.
|
|
||||||
"""
|
|
||||||
|
|
||||||
alias Tilly.BDD
|
|
||||||
alias Tilly.BDD.Node
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Smart constructor for leaf nodes.
|
|
||||||
Uses the `ops_module` to test if the `leaf_value` corresponds to
|
|
||||||
an empty or universal set for that module.
|
|
||||||
Returns `{new_typing_ctx, node_id}`.
|
|
||||||
"""
|
|
||||||
def leaf(typing_ctx, leaf_value, ops_module) do
|
|
||||||
case apply(ops_module, :test_leaf_value, [leaf_value]) do
|
|
||||||
:empty ->
|
|
||||||
{typing_ctx, BDD.false_node_id()}
|
|
||||||
|
|
||||||
:full ->
|
|
||||||
{typing_ctx, BDD.true_node_id()}
|
|
||||||
|
|
||||||
:other ->
|
|
||||||
logical_structure = Node.mk_leaf(leaf_value)
|
|
||||||
BDD.get_or_intern_node(typing_ctx, logical_structure, ops_module)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Smart constructor for split nodes. Applies simplification rules.
|
|
||||||
Returns `{new_typing_ctx, node_id}`.
|
|
||||||
"""
|
|
||||||
def split(typing_ctx, element, p_id, i_id, n_id, ops_module) do
|
|
||||||
# Apply simplification rules. Order can be important.
|
|
||||||
cond do
|
|
||||||
# If ignore and negative children are False, result is positive child.
|
|
||||||
BDD.is_false_node?(typing_ctx, i_id) and
|
|
||||||
BDD.is_false_node?(typing_ctx, n_id) ->
|
|
||||||
{typing_ctx, p_id}
|
|
||||||
|
|
||||||
# If ignore child is True, the whole BDD is True.
|
|
||||||
BDD.is_true_node?(typing_ctx, i_id) ->
|
|
||||||
{typing_ctx, BDD.true_node_id()}
|
|
||||||
|
|
||||||
# If positive and negative children are the same.
|
|
||||||
p_id == n_id ->
|
|
||||||
if p_id == i_id do
|
|
||||||
# All three children are identical.
|
|
||||||
{typing_ctx, p_id}
|
|
||||||
else
|
|
||||||
# Result is p_id (or n_id) unioned with i_id.
|
|
||||||
# This creates a potential mutual recursion with union_bdds
|
|
||||||
# which needs to be handled by the apply_op cache.
|
|
||||||
union_bdds(typing_ctx, p_id, i_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
# TODO: Add more simplification rules from CDuce bdd.ml `split` as needed.
|
|
||||||
# e.g. if p=T, i=F, n=T -> True
|
|
||||||
# e.g. if p=F, i=F, n=T -> not(x) relative to this BDD's element universe (complex)
|
|
||||||
|
|
||||||
true ->
|
|
||||||
# No further simplification rule applied, intern the node.
|
|
||||||
logical_structure = Node.mk_split(element, p_id, i_id, n_id)
|
|
||||||
BDD.get_or_intern_node(typing_ctx, logical_structure, ops_module)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the union of two BDDs.
|
|
||||||
Returns `{new_typing_ctx, result_node_id}`.
|
|
||||||
"""
|
|
||||||
def union_bdds(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
apply_op(typing_ctx, :union, bdd1_id, bdd2_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the intersection of two BDDs.
|
|
||||||
Returns `{new_typing_ctx, result_node_id}`.
|
|
||||||
"""
|
|
||||||
def intersection_bdds(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
apply_op(typing_ctx, :intersection, bdd1_id, bdd2_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the negation of a BDD.
|
|
||||||
Returns `{new_typing_ctx, result_node_id}`.
|
|
||||||
"""
|
|
||||||
def negation_bdd(typing_ctx, bdd_id) do
|
|
||||||
# The second argument to apply_op is nil for unary operations like negation.
|
|
||||||
apply_op(typing_ctx, :negation, bdd_id, nil)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the difference of two BDDs (bdd1 - bdd2).
|
|
||||||
Returns `{new_typing_ctx, result_node_id}`.
|
|
||||||
Implemented as `bdd1 INTERSECTION (NEGATION bdd2)`.
|
|
||||||
"""
|
|
||||||
def difference_bdd(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
{ctx, neg_bdd2_id} = negation_bdd(typing_ctx, bdd2_id)
|
|
||||||
intersection_bdds(ctx, bdd1_id, neg_bdd2_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
# Internal function to handle actual BDD operations, bypassing cache for direct calls.
|
|
||||||
defp do_union_bdds(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
# Ensure canonical order for commutative operations if not handled by apply_op key
|
|
||||||
# For simplicity, apply_op will handle canonical key generation.
|
|
||||||
|
|
||||||
# 1. Handle terminal cases
|
|
||||||
cond do
|
|
||||||
bdd1_id == bdd2_id -> {typing_ctx, bdd1_id}
|
|
||||||
BDD.is_true_node?(typing_ctx, bdd1_id) -> {typing_ctx, BDD.true_node_id()}
|
|
||||||
BDD.is_true_node?(typing_ctx, bdd2_id) -> {typing_ctx, BDD.true_node_id()}
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd1_id) -> {typing_ctx, bdd2_id}
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd2_id) -> {typing_ctx, bdd1_id}
|
|
||||||
true -> perform_union(typing_ctx, bdd1_id, bdd2_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp perform_union(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
%{structure: s1, ops_module: ops_m1} = BDD.get_node_data(typing_ctx, bdd1_id)
|
|
||||||
%{structure: s2, ops_module: ops_m2} = BDD.get_node_data(typing_ctx, bdd2_id)
|
|
||||||
|
|
||||||
# For now, assume ops_modules must match for simplicity.
|
|
||||||
# Production systems might need more complex logic or type errors here.
|
|
||||||
if ops_m1 != ops_m2 do
|
|
||||||
raise ArgumentError,
|
|
||||||
"Cannot union BDDs with different ops_modules: #{inspect(ops_m1)} and #{inspect(ops_m2)}"
|
|
||||||
end
|
|
||||||
|
|
||||||
ops_m = ops_m1
|
|
||||||
|
|
||||||
case {s1, s2} do
|
|
||||||
# Both are leaves
|
|
||||||
{{:leaf, v1}, {:leaf, v2}} ->
|
|
||||||
new_leaf_val = apply(ops_m, :union_leaves, [typing_ctx, v1, v2])
|
|
||||||
leaf(typing_ctx, new_leaf_val, ops_m)
|
|
||||||
|
|
||||||
# s1 is split, s2 is leaf
|
|
||||||
{{:split, x1, p1_id, i1_id, n1_id}, {:leaf, _v2}} ->
|
|
||||||
# CDuce: split x1 p1 (i1 ++ b) n1
|
|
||||||
{ctx, new_i1_id} = union_bdds(typing_ctx, i1_id, bdd2_id)
|
|
||||||
split(ctx, x1, p1_id, new_i1_id, n1_id, ops_m)
|
|
||||||
|
|
||||||
# s1 is leaf, s2 is split
|
|
||||||
{{:leaf, _v1}, {:split, x2, p2_id, i2_id, n2_id}} ->
|
|
||||||
# CDuce: split x2 p2 (i2 ++ a) n2 (symmetric to above)
|
|
||||||
{ctx, new_i2_id} = union_bdds(typing_ctx, i2_id, bdd1_id)
|
|
||||||
split(ctx, x2, p2_id, new_i2_id, n2_id, ops_m)
|
|
||||||
|
|
||||||
# Both are splits
|
|
||||||
{{:split, x1, p1_id, i1_id, n1_id}, {:split, x2, p2_id, i2_id, n2_id}} ->
|
|
||||||
# Compare elements using the ops_module
|
|
||||||
comp_result = apply(ops_m, :compare_elements, [x1, x2])
|
|
||||||
|
|
||||||
cond do
|
|
||||||
comp_result == :eq ->
|
|
||||||
# Elements are equal, merge children
|
|
||||||
{ctx0, new_p_id} = union_bdds(typing_ctx, p1_id, p2_id)
|
|
||||||
{ctx1, new_i_id} = union_bdds(ctx0, i1_id, i2_id)
|
|
||||||
{ctx2, new_n_id} = union_bdds(ctx1, n1_id, n2_id)
|
|
||||||
split(ctx2, x1, new_p_id, new_i_id, new_n_id, ops_m)
|
|
||||||
|
|
||||||
comp_result == :lt ->
|
|
||||||
# x1 < x2
|
|
||||||
# CDuce: split x1 p1 (i1 ++ b) n1
|
|
||||||
{ctx, new_i1_id} = union_bdds(typing_ctx, i1_id, bdd2_id)
|
|
||||||
split(ctx, x1, p1_id, new_i1_id, n1_id, ops_m)
|
|
||||||
|
|
||||||
comp_result == :gt ->
|
|
||||||
# x1 > x2
|
|
||||||
# CDuce: split x2 p2 (i2 ++ a) n2
|
|
||||||
{ctx, new_i2_id} = union_bdds(typing_ctx, i2_id, bdd1_id)
|
|
||||||
split(ctx, x2, p2_id, new_i2_id, n2_id, ops_m)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp do_intersection_bdds(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
# Canonical order handled by apply_op key generation.
|
|
||||||
|
|
||||||
# Fast path for disjoint singleton BDDs
|
|
||||||
case {BDD.get_node_data(typing_ctx, bdd1_id), BDD.get_node_data(typing_ctx, bdd2_id)} do
|
|
||||||
{%{structure: {:split, x1, t, f, f}, ops_module: m},
|
|
||||||
%{structure: {:split, x2, t, f, f}, ops_module: m}}
|
|
||||||
when x1 != x2 ->
|
|
||||||
{typing_ctx, BDD.false_node_id()}
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
# 1. Handle terminal cases
|
|
||||||
cond do
|
|
||||||
bdd1_id == bdd2_id -> {typing_ctx, bdd1_id}
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd1_id) -> {typing_ctx, BDD.false_node_id()}
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd2_id) -> {typing_ctx, BDD.false_node_id()}
|
|
||||||
BDD.is_true_node?(typing_ctx, bdd1_id) -> {typing_ctx, bdd2_id}
|
|
||||||
BDD.is_true_node?(typing_ctx, bdd2_id) -> {typing_ctx, bdd1_id}
|
|
||||||
true -> perform_intersection(typing_ctx, bdd1_id, bdd2_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp perform_intersection(typing_ctx, bdd1_id, bdd2_id) do
|
|
||||||
%{structure: s1, ops_module: ops_m1} = BDD.get_node_data(typing_ctx, bdd1_id)
|
|
||||||
%{structure: s2, ops_module: ops_m2} = BDD.get_node_data(typing_ctx, bdd2_id)
|
|
||||||
|
|
||||||
if ops_m1 != ops_m2 do
|
|
||||||
raise ArgumentError,
|
|
||||||
"Cannot intersect BDDs with different ops_modules: #{inspect(ops_m1)} and #{inspect(ops_m2)}"
|
|
||||||
end
|
|
||||||
|
|
||||||
ops_m = ops_m1
|
|
||||||
|
|
||||||
case {s1, s2} do
|
|
||||||
# Both are leaves
|
|
||||||
{{:leaf, v1}, {:leaf, v2}} ->
|
|
||||||
new_leaf_val = apply(ops_m, :intersection_leaves, [typing_ctx, v1, v2])
|
|
||||||
leaf(typing_ctx, new_leaf_val, ops_m)
|
|
||||||
|
|
||||||
# s1 is split, s2 is leaf
|
|
||||||
{{:split, x1, p1_id, i1_id, n1_id}, {:leaf, _v2}} ->
|
|
||||||
{ctx0, new_p1_id} = intersection_bdds(typing_ctx, p1_id, bdd2_id)
|
|
||||||
{ctx1, new_i1_id} = intersection_bdds(ctx0, i1_id, bdd2_id)
|
|
||||||
{ctx2, new_n1_id} = intersection_bdds(ctx1, n1_id, bdd2_id)
|
|
||||||
split(ctx2, x1, new_p1_id, new_i1_id, new_n1_id, ops_m)
|
|
||||||
|
|
||||||
# s1 is leaf, s2 is split
|
|
||||||
{{:leaf, _v1}, {:split, x2, p2_id, i2_id, n2_id}} ->
|
|
||||||
{ctx0, new_p2_id} = intersection_bdds(typing_ctx, bdd1_id, p2_id)
|
|
||||||
{ctx1, new_i2_id} = intersection_bdds(ctx0, bdd1_id, i2_id)
|
|
||||||
{ctx2, new_n2_id} = intersection_bdds(ctx1, bdd1_id, n2_id)
|
|
||||||
split(ctx2, x2, new_p2_id, new_i2_id, new_n2_id, ops_m)
|
|
||||||
|
|
||||||
# Both are splits
|
|
||||||
{{:split, x1, p1_id, i1_id, n1_id}, {:split, x2, p2_id, i2_id, n2_id}} ->
|
|
||||||
comp_result = apply(ops_m, :compare_elements, [x1, x2])
|
|
||||||
|
|
||||||
cond do
|
|
||||||
comp_result == :eq ->
|
|
||||||
# CDuce: split x1 ((p1**(p2++i2))++(p2**i1)) (i1**i2) ((n1**(n2++i2))++(n2**i1))
|
|
||||||
{ctx0, p2_u_i2} = union_bdds(typing_ctx, p2_id, i2_id)
|
|
||||||
{ctx1, n2_u_i2} = union_bdds(ctx0, n2_id, i2_id)
|
|
||||||
|
|
||||||
{ctx2, p1_i_p2ui2} = intersection_bdds(ctx1, p1_id, p2_u_i2)
|
|
||||||
{ctx3, p2_i_i1} = intersection_bdds(ctx2, p2_id, i1_id)
|
|
||||||
{ctx4, new_p_id} = union_bdds(ctx3, p1_i_p2ui2, p2_i_i1)
|
|
||||||
|
|
||||||
{ctx5, new_i_id} = intersection_bdds(ctx4, i1_id, i2_id)
|
|
||||||
|
|
||||||
{ctx6, n1_i_n2ui2} = intersection_bdds(ctx5, n1_id, n2_u_i2)
|
|
||||||
{ctx7, n2_i_i1} = intersection_bdds(ctx6, n2_id, i1_id)
|
|
||||||
{ctx8, new_n_id} = union_bdds(ctx7, n1_i_n2ui2, n2_i_i1)
|
|
||||||
|
|
||||||
split(ctx8, x1, new_p_id, new_i_id, new_n_id, ops_m)
|
|
||||||
|
|
||||||
# x1 < x2
|
|
||||||
comp_result == :lt ->
|
|
||||||
# CDuce: split x1 (p1 ** b) (i1 ** b) (n1 ** b) where b is bdd2
|
|
||||||
{ctx0, new_p1_id} = intersection_bdds(typing_ctx, p1_id, bdd2_id)
|
|
||||||
{ctx1, new_i1_id} = intersection_bdds(ctx0, i1_id, bdd2_id)
|
|
||||||
{ctx2, new_n1_id} = intersection_bdds(ctx1, n1_id, bdd2_id)
|
|
||||||
split(ctx2, x1, new_p1_id, new_i1_id, new_n1_id, ops_m)
|
|
||||||
|
|
||||||
# x1 > x2
|
|
||||||
comp_result == :gt ->
|
|
||||||
# CDuce: split x2 (a ** p2) (a ** i2) (a ** n2) where a is bdd1
|
|
||||||
{ctx0, new_p2_id} = intersection_bdds(typing_ctx, bdd1_id, p2_id)
|
|
||||||
{ctx1, new_i2_id} = intersection_bdds(ctx0, bdd1_id, i2_id)
|
|
||||||
{ctx2, new_n2_id} = intersection_bdds(ctx1, bdd1_id, n2_id)
|
|
||||||
split(ctx2, x2, new_p2_id, new_i2_id, new_n2_id, ops_m)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp do_negation_bdd(typing_ctx, bdd_id) do
|
|
||||||
# 1. Handle terminal cases
|
|
||||||
cond do
|
|
||||||
BDD.is_true_node?(typing_ctx, bdd_id) -> {typing_ctx, BDD.false_node_id()}
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd_id) -> {typing_ctx, BDD.true_node_id()}
|
|
||||||
true -> perform_negation(typing_ctx, bdd_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp perform_negation(typing_ctx, bdd_id) do
|
|
||||||
%{structure: s, ops_module: ops_m} = BDD.get_node_data(typing_ctx, bdd_id)
|
|
||||||
|
|
||||||
case s do
|
|
||||||
# Leaf
|
|
||||||
{:leaf, v} ->
|
|
||||||
neg_leaf_val = apply(ops_m, :negation_leaf, [typing_ctx, v])
|
|
||||||
leaf(typing_ctx, neg_leaf_val, ops_m)
|
|
||||||
|
|
||||||
# Split
|
|
||||||
{:split, x, p_id, i_id, n_id} ->
|
|
||||||
# CDuce: ~~i ** split x (~~p) (~~(p++n)) (~~n)
|
|
||||||
{ctx0, neg_i_id} = negation_bdd(typing_ctx, i_id)
|
|
||||||
{ctx1, neg_p_id} = negation_bdd(ctx0, p_id)
|
|
||||||
{ctx2, p_u_n_id} = union_bdds(ctx1, p_id, n_id)
|
|
||||||
{ctx3, neg_p_u_n_id} = negation_bdd(ctx2, p_u_n_id)
|
|
||||||
{ctx4, neg_n_id} = negation_bdd(ctx3, n_id)
|
|
||||||
{ctx5, split_part_id} = split(ctx4, x, neg_p_id, neg_p_u_n_id, neg_n_id, ops_m)
|
|
||||||
intersection_bdds(ctx5, neg_i_id, split_part_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# --- Caching Wrapper for BDD Operations ---
|
|
||||||
defp apply_op(typing_ctx, op_key, bdd1_id, bdd2_id) do
|
|
||||||
cache_key = make_cache_key(op_key, bdd1_id, bdd2_id)
|
|
||||||
bdd_store = Map.get(typing_ctx, :bdd_store)
|
|
||||||
|
|
||||||
case Map.get(bdd_store.ops_cache, cache_key) do
|
|
||||||
nil ->
|
|
||||||
# Not in cache, compute it
|
|
||||||
{new_typing_ctx, result_id} =
|
|
||||||
case op_key do
|
|
||||||
:union -> do_union_bdds(typing_ctx, bdd1_id, bdd2_id)
|
|
||||||
:intersection -> do_intersection_bdds(typing_ctx, bdd1_id, bdd2_id)
|
|
||||||
# bdd2_id is nil here
|
|
||||||
:negation -> do_negation_bdd(typing_ctx, bdd1_id)
|
|
||||||
_ -> raise "Unsupported op_key: #{op_key}"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Store in cache
|
|
||||||
# IMPORTANT: Use new_typing_ctx (from the operation) to get the potentially updated bdd_store
|
|
||||||
current_bdd_store_after_op = Map.get(new_typing_ctx, :bdd_store)
|
|
||||||
new_ops_cache = Map.put(current_bdd_store_after_op.ops_cache, cache_key, result_id)
|
|
||||||
final_bdd_store_with_cache = %{current_bdd_store_after_op | ops_cache: new_ops_cache}
|
|
||||||
# And put this updated bdd_store back into new_typing_ctx
|
|
||||||
final_typing_ctx_with_cache =
|
|
||||||
Map.put(new_typing_ctx, :bdd_store, final_bdd_store_with_cache)
|
|
||||||
|
|
||||||
{final_typing_ctx_with_cache, result_id}
|
|
||||||
|
|
||||||
cached_result_id ->
|
|
||||||
{typing_ctx, cached_result_id}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp make_cache_key(:negation, bdd_id, nil), do: {:negation, bdd_id}
|
|
||||||
|
|
||||||
defp make_cache_key(op_key, id1, id2) when op_key in [:union, :intersection] do
|
|
||||||
# Canonical order for commutative binary operations
|
|
||||||
if id1 <= id2, do: {op_key, id1, id2}, else: {op_key, id2, id1}
|
|
||||||
end
|
|
||||||
|
|
||||||
defp make_cache_key(op_key, id1, id2), do: {op_key, id1, id2}
|
|
||||||
end
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
defmodule Tilly.BDD.StringBoolOps do
|
|
||||||
@moduledoc """
|
|
||||||
BDD operations module for sets of strings.
|
|
||||||
Elements are strings, and leaf values are booleans.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Compares two strings.
|
|
||||||
Returns `:lt`, `:eq`, or `:gt`.
|
|
||||||
"""
|
|
||||||
def compare_elements(elem1, elem2) when is_binary(elem1) and is_binary(elem2) do
|
|
||||||
cond do
|
|
||||||
elem1 < elem2 -> :lt
|
|
||||||
elem1 > elem2 -> :gt
|
|
||||||
true -> :eq
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if two strings are equal.
|
|
||||||
"""
|
|
||||||
def equal_element?(elem1, elem2) when is_binary(elem1) and is_binary(elem2) do
|
|
||||||
elem1 == elem2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Hashes a string.
|
|
||||||
"""
|
|
||||||
def hash_element(elem) when is_binary(elem) do
|
|
||||||
# erlang.phash2 is suitable for term hashing
|
|
||||||
:erlang.phash2(elem)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
The leaf value representing an empty set of strings (false).
|
|
||||||
"""
|
|
||||||
def empty_leaf(), do: false
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
The leaf value representing the universal set of strings (true).
|
|
||||||
"""
|
|
||||||
def any_leaf(), do: true
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Checks if a leaf value represents an empty set.
|
|
||||||
"""
|
|
||||||
def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do
|
|
||||||
leaf_val == false
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the union of two leaf values.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 or leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the intersection of two leaf values.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def intersection_leaves(_typing_ctx, leaf1, leaf2)
|
|
||||||
when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
leaf1 and leaf2
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the negation of a leaf value.
|
|
||||||
`typing_ctx` is included for interface consistency, but not used for boolean leaves.
|
|
||||||
"""
|
|
||||||
def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do
|
|
||||||
not leaf
|
|
||||||
end
|
|
||||||
|
|
||||||
# def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do
|
|
||||||
# leaf1 and (not leaf2)
|
|
||||||
# end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Tests a leaf value to determine if it represents an empty, full, or other set.
|
|
||||||
Returns `:empty`, `:full`, or `:other`.
|
|
||||||
"""
|
|
||||||
def test_leaf_value(true), do: :full
|
|
||||||
def test_leaf_value(false), do: :empty
|
|
||||||
# def test_leaf_value(_other), do: :other
|
|
||||||
end
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
defmodule Tilly.Type do
|
|
||||||
@moduledoc """
|
|
||||||
Defines the structure of a Type Descriptor (`Descr`) and provides
|
|
||||||
helper functions for creating fundamental type descriptors.
|
|
||||||
|
|
||||||
A Type Descriptor is a map representing a type. Each field in the map
|
|
||||||
corresponds to a basic kind of type component (e.g., atoms, integers, pairs)
|
|
||||||
and holds a BDD node ID. These BDDs represent the set of values
|
|
||||||
allowed for that particular component of the type.
|
|
||||||
"""
|
|
||||||
|
|
||||||
alias Tilly.BDD
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns a `Descr` map representing the empty type (Nothing).
|
|
||||||
All BDD IDs in this `Descr` point to the canonical `false` BDD node.
|
|
||||||
The `typing_ctx` is passed for consistency but not modified by this function.
|
|
||||||
"""
|
|
||||||
def empty_descr(_typing_ctx) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
|
|
||||||
%{
|
|
||||||
atoms_bdd_id: false_id,
|
|
||||||
integers_bdd_id: false_id,
|
|
||||||
strings_bdd_id: false_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
# Add other kinds as needed, e.g., for abstract types
|
|
||||||
}
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Returns a `Descr` map representing the universal type (Any).
|
|
||||||
All BDD IDs in this `Descr` point to the canonical `true` BDD node.
|
|
||||||
The `typing_ctx` is passed for consistency but not modified by this function.
|
|
||||||
"""
|
|
||||||
def any_descr(_typing_ctx) do
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
%{
|
|
||||||
atoms_bdd_id: true_id,
|
|
||||||
integers_bdd_id: true_id,
|
|
||||||
strings_bdd_id: true_id,
|
|
||||||
pairs_bdd_id: true_id,
|
|
||||||
records_bdd_id: true_id,
|
|
||||||
functions_bdd_id: true_id,
|
|
||||||
# For 'Any', absence is typically not included unless explicitly modeled.
|
|
||||||
# If 'Any' should include the possibility of absence, this would be true_id.
|
|
||||||
# For now, let's assume 'Any' means any *value*, so absence is false.
|
|
||||||
# This can be refined based on the desired semantics of 'Any'.
|
|
||||||
# CDuce 'Any' does not include 'Absent'.
|
|
||||||
absent_marker_bdd_id: BDD.false_node_id()
|
|
||||||
}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,305 +0,0 @@
|
|||||||
defmodule Tilly.Type.Ops do
|
|
||||||
@moduledoc """
|
|
||||||
Implements set-theoretic operations on Type Descriptors (`Descr` maps)
|
|
||||||
and provides helper functions for constructing specific types.
|
|
||||||
Operations work with interned `Descr` IDs.
|
|
||||||
"""
|
|
||||||
|
|
||||||
alias Tilly.BDD
|
|
||||||
alias Tilly.Type
|
|
||||||
alias Tilly.Type.Store
|
|
||||||
|
|
||||||
# Defines the fields in a Descr map that hold BDD IDs.
|
|
||||||
# Order can be relevant if specific iteration order is ever needed, but for field-wise ops it's not.
|
|
||||||
defp descr_fields do
|
|
||||||
[
|
|
||||||
:atoms_bdd_id,
|
|
||||||
:integers_bdd_id,
|
|
||||||
:strings_bdd_id,
|
|
||||||
:pairs_bdd_id,
|
|
||||||
:records_bdd_id,
|
|
||||||
:functions_bdd_id,
|
|
||||||
:absent_marker_bdd_id
|
|
||||||
]
|
|
||||||
end
|
|
||||||
|
|
||||||
# --- Core Set Operations ---
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the union of two types represented by their `Descr` IDs.
|
|
||||||
Returns `{new_typing_ctx, result_descr_id}`.
|
|
||||||
"""
|
|
||||||
def union_types(typing_ctx, descr1_id, descr2_id) do
|
|
||||||
apply_type_op(typing_ctx, :union, descr1_id, descr2_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the intersection of two types represented by their `Descr` IDs.
|
|
||||||
Returns `{new_typing_ctx, result_descr_id}`.
|
|
||||||
"""
|
|
||||||
def intersection_types(typing_ctx, descr1_id, descr2_id) do
|
|
||||||
apply_type_op(typing_ctx, :intersection, descr1_id, descr2_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Computes the negation of a type represented by its `Descr` ID.
|
|
||||||
Returns `{new_typing_ctx, result_descr_id}`.
|
|
||||||
"""
|
|
||||||
def negation_type(typing_ctx, descr_id) do
|
|
||||||
apply_type_op(typing_ctx, :negation, descr_id, nil)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp do_union_types(typing_ctx, descr1_id, descr2_id) do
|
|
||||||
descr1 = Store.get_descr_by_id(typing_ctx, descr1_id)
|
|
||||||
descr2 = Store.get_descr_by_id(typing_ctx, descr2_id)
|
|
||||||
|
|
||||||
{final_ctx, result_fields_map} =
|
|
||||||
Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} ->
|
|
||||||
bdd1_id = Map.get(descr1, field)
|
|
||||||
bdd2_id = Map.get(descr2, field)
|
|
||||||
{new_ctx, result_bdd_id} = BDD.Ops.union_bdds(current_ctx, bdd1_id, bdd2_id)
|
|
||||||
{new_ctx, Map.put(acc_fields, field, result_bdd_id)}
|
|
||||||
end)
|
|
||||||
|
|
||||||
Store.get_or_intern_descr(final_ctx, result_fields_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp do_intersection_types(typing_ctx, descr1_id, descr2_id) do
|
|
||||||
descr1 = Store.get_descr_by_id(typing_ctx, descr1_id)
|
|
||||||
descr2 = Store.get_descr_by_id(typing_ctx, descr2_id)
|
|
||||||
|
|
||||||
{final_ctx, result_fields_map} =
|
|
||||||
Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} ->
|
|
||||||
bdd1_id = Map.get(descr1, field)
|
|
||||||
bdd2_id = Map.get(descr2, field)
|
|
||||||
{new_ctx, result_bdd_id} = BDD.Ops.intersection_bdds(current_ctx, bdd1_id, bdd2_id)
|
|
||||||
{new_ctx, Map.put(acc_fields, field, result_bdd_id)}
|
|
||||||
end)
|
|
||||||
|
|
||||||
Store.get_or_intern_descr(final_ctx, result_fields_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp do_negation_type(typing_ctx, descr_id) do
|
|
||||||
descr = Store.get_descr_by_id(typing_ctx, descr_id)
|
|
||||||
|
|
||||||
{final_ctx, result_fields_map} =
|
|
||||||
Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} ->
|
|
||||||
bdd_id = Map.get(descr, field)
|
|
||||||
|
|
||||||
{ctx_after_neg, result_bdd_id} =
|
|
||||||
if field == :absent_marker_bdd_id do
|
|
||||||
{current_ctx, BDD.false_node_id()}
|
|
||||||
else
|
|
||||||
BDD.Ops.negation_bdd(current_ctx, bdd_id)
|
|
||||||
end
|
|
||||||
|
|
||||||
{ctx_after_neg, Map.put(acc_fields, field, result_bdd_id)}
|
|
||||||
end)
|
|
||||||
|
|
||||||
# Re-evaluate context threading if BDD ops significantly alter it beyond caching during reduce.
|
|
||||||
# The primary context update happens with Store.get_or_intern_descr.
|
|
||||||
# The reduce passes current_ctx, which accumulates cache updates from BDD ops.
|
|
||||||
Store.get_or_intern_descr(final_ctx, result_fields_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
# --- Caching Wrapper for Type Operations ---
|
|
||||||
defp apply_type_op(typing_ctx, op_key, descr1_id, descr2_id) do
|
|
||||||
cache_key = make_type_op_cache_key(op_key, descr1_id, descr2_id)
|
|
||||||
type_store = Map.get(typing_ctx, :type_store)
|
|
||||||
|
|
||||||
case Map.get(type_store.ops_cache, cache_key) do
|
|
||||||
nil ->
|
|
||||||
# Not in cache, compute it
|
|
||||||
{new_typing_ctx, result_id} =
|
|
||||||
case op_key do
|
|
||||||
:union -> do_union_types(typing_ctx, descr1_id, descr2_id)
|
|
||||||
:intersection -> do_intersection_types(typing_ctx, descr1_id, descr2_id)
|
|
||||||
:negation -> do_negation_type(typing_ctx, descr1_id) # descr2_id is nil here
|
|
||||||
_ -> raise "Unsupported type op_key: #{op_key}"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Store in cache (important: use new_typing_ctx to get potentially updated type_store)
|
|
||||||
current_type_store_after_op = Map.get(new_typing_ctx, :type_store)
|
|
||||||
new_ops_cache = Map.put(current_type_store_after_op.ops_cache, cache_key, result_id)
|
|
||||||
final_type_store_with_cache = %{current_type_store_after_op | ops_cache: new_ops_cache}
|
|
||||||
# And put this updated type_store back into new_typing_ctx
|
|
||||||
final_typing_ctx_with_cache = Map.put(new_typing_ctx, :type_store, final_type_store_with_cache)
|
|
||||||
{final_typing_ctx_with_cache, result_id}
|
|
||||||
|
|
||||||
cached_result_id ->
|
|
||||||
{typing_ctx, cached_result_id}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp make_type_op_cache_key(:negation, descr_id, nil), do: {:negation, descr_id}
|
|
||||||
defp make_type_op_cache_key(op_key, id1, id2) when op_key in [:union, :intersection] do
|
|
||||||
if id1 <= id2, do: {op_key, id1, id2}, else: {op_key, id2, id1}
|
|
||||||
end
|
|
||||||
defp make_type_op_cache_key(op_key, id1, id2), do: {op_key, id1, id2}
|
|
||||||
|
|
||||||
|
|
||||||
# --- Utility Functions ---
|
|
||||||
@doc """
|
|
||||||
Checks if a type represented by its `Descr` ID is the empty type (Nothing).
|
|
||||||
Does not modify `typing_ctx`.
|
|
||||||
"""
|
|
||||||
def is_empty_type?(typing_ctx, descr_id) do
|
|
||||||
descr_map = Store.get_descr_by_id(typing_ctx, descr_id)
|
|
||||||
|
|
||||||
Enum.all?(descr_fields(), fn field ->
|
|
||||||
bdd_id = Map.get(descr_map, field)
|
|
||||||
BDD.is_false_node?(typing_ctx, bdd_id)
|
|
||||||
end)
|
|
||||||
end
|
|
||||||
|
|
||||||
# --- Construction Helper Functions ---
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets the `Descr` ID for the canonical 'Nothing' type.
|
|
||||||
"""
|
|
||||||
def get_type_nothing(typing_ctx) do
|
|
||||||
empty_descr_map = Type.empty_descr(typing_ctx)
|
|
||||||
Store.get_or_intern_descr(typing_ctx, empty_descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets the `Descr` ID for the canonical 'Any' type.
|
|
||||||
"""
|
|
||||||
def get_type_any(typing_ctx) do
|
|
||||||
any_descr_map = Type.any_descr(typing_ctx)
|
|
||||||
Store.get_or_intern_descr(typing_ctx, any_descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Creates a type `Descr` ID representing a single atom literal.
|
|
||||||
"""
|
|
||||||
def create_atom_literal_type(typing_ctx, atom_value) when is_atom(atom_value) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
# Create a BDD for the single atom: Split(atom_value, True, False, False)
|
|
||||||
# The ops_module Tilly.BDD.AtomBoolOps is crucial here.
|
|
||||||
{ctx1, atom_bdd_id} =
|
|
||||||
BDD.Ops.split(typing_ctx, atom_value, true_id, false_id, false_id, Tilly.BDD.AtomBoolOps)
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: atom_bdd_id,
|
|
||||||
integers_bdd_id: false_id,
|
|
||||||
strings_bdd_id: false_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
|
|
||||||
Store.get_or_intern_descr(ctx1, descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Creates a type `Descr` ID representing a single integer literal.
|
|
||||||
"""
|
|
||||||
def create_integer_literal_type(typing_ctx, integer_value) when is_integer(integer_value) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
{ctx1, integer_bdd_id} =
|
|
||||||
BDD.Ops.split(typing_ctx, integer_value, true_id, false_id, false_id, Tilly.BDD.IntegerBoolOps)
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: false_id,
|
|
||||||
integers_bdd_id: integer_bdd_id,
|
|
||||||
strings_bdd_id: false_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
Store.get_or_intern_descr(ctx1, descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Creates a type `Descr` ID representing a single string literal.
|
|
||||||
"""
|
|
||||||
def create_string_literal_type(typing_ctx, string_value) when is_binary(string_value) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
{ctx1, string_bdd_id} =
|
|
||||||
BDD.Ops.split(typing_ctx, string_value, true_id, false_id, false_id, Tilly.BDD.StringBoolOps)
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: false_id,
|
|
||||||
integers_bdd_id: false_id,
|
|
||||||
strings_bdd_id: string_bdd_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
Store.get_or_intern_descr(ctx1, descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets the `Descr` ID for the type representing all atoms.
|
|
||||||
"""
|
|
||||||
def get_primitive_type_any_atom(typing_ctx) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id() # This BDD must be interned with :atom_bool_ops if it's not universal
|
|
||||||
|
|
||||||
# For a BDD representing "all atoms", its structure is simply True,
|
|
||||||
# but it must be associated with :atom_bool_ops.
|
|
||||||
# BDD.true_node_id() is universal. If we need a specific "true for atoms",
|
|
||||||
# we'd intern it: BDD.get_or_intern_node(ctx, Node.mk_true(), :atom_bool_ops)
|
|
||||||
# However, BDD.Ops functions fetch ops_module from operands.
|
|
||||||
# Universal true/false should work correctly.
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: true_id,
|
|
||||||
integers_bdd_id: false_id,
|
|
||||||
strings_bdd_id: false_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
Store.get_or_intern_descr(typing_ctx, descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets the `Descr` ID for the type representing all integers.
|
|
||||||
"""
|
|
||||||
def get_primitive_type_any_integer(typing_ctx) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: false_id,
|
|
||||||
integers_bdd_id: true_id,
|
|
||||||
strings_bdd_id: false_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
Store.get_or_intern_descr(typing_ctx, descr_map)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets the `Descr` ID for the type representing all strings.
|
|
||||||
"""
|
|
||||||
def get_primitive_type_any_string(typing_ctx) do
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
descr_map = %{
|
|
||||||
atoms_bdd_id: false_id,
|
|
||||||
integers_bdd_id: false_id,
|
|
||||||
strings_bdd_id: true_id,
|
|
||||||
pairs_bdd_id: false_id,
|
|
||||||
records_bdd_id: false_id,
|
|
||||||
functions_bdd_id: false_id,
|
|
||||||
absent_marker_bdd_id: false_id
|
|
||||||
}
|
|
||||||
Store.get_or_intern_descr(typing_ctx, descr_map)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,79 +0,0 @@
|
|||||||
defmodule Tilly.Type.Store do
|
|
||||||
@moduledoc """
|
|
||||||
Manages the interning (hash-consing) of Type Descriptor maps (`Descr` maps).
|
|
||||||
Ensures that for any unique `Descr` map, there is one canonical integer ID.
|
|
||||||
The type store is expected to be part of a `typing_ctx` map under the key `:type_store`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@initial_next_descr_id 0
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Initializes the type store within the typing context.
|
|
||||||
"""
|
|
||||||
def init_type_store(typing_ctx) when is_map(typing_ctx) do
|
|
||||||
type_store = %{
|
|
||||||
descrs_by_structure: %{},
|
|
||||||
structures_by_id: %{},
|
|
||||||
next_descr_id: @initial_next_descr_id,
|
|
||||||
ops_cache: %{} # Cache for type operations {op_key, descr_id1, descr_id2} -> result_descr_id
|
|
||||||
}
|
|
||||||
|
|
||||||
Map.put(typing_ctx, :type_store, type_store)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Gets an existing Type Descriptor ID or interns a new one if it's not already in the store.
|
|
||||||
|
|
||||||
All BDD IDs within the `descr_map` must already be canonical integer IDs.
|
|
||||||
|
|
||||||
Returns a tuple `{new_typing_ctx, descr_id}`.
|
|
||||||
The `typing_ctx` is updated if a new `Descr` is interned.
|
|
||||||
"""
|
|
||||||
def get_or_intern_descr(typing_ctx, descr_map) do
|
|
||||||
type_store = Map.get(typing_ctx, :type_store)
|
|
||||||
|
|
||||||
unless type_store do
|
|
||||||
raise ArgumentError, "Type store not initialized in typing_ctx. Call init_type_store first."
|
|
||||||
end
|
|
||||||
|
|
||||||
# The descr_map itself is the key for interning.
|
|
||||||
# Assumes BDD IDs within descr_map are already canonical.
|
|
||||||
case Map.get(type_store.descrs_by_structure, descr_map) do
|
|
||||||
nil ->
|
|
||||||
# Descr not found, intern it
|
|
||||||
descr_id = type_store.next_descr_id
|
|
||||||
|
|
||||||
new_descrs_by_structure = Map.put(type_store.descrs_by_structure, descr_map, descr_id)
|
|
||||||
new_structures_by_id = Map.put(type_store.structures_by_id, descr_id, descr_map)
|
|
||||||
new_next_descr_id = descr_id + 1
|
|
||||||
|
|
||||||
new_type_store =
|
|
||||||
%{
|
|
||||||
type_store
|
|
||||||
| descrs_by_structure: new_descrs_by_structure,
|
|
||||||
structures_by_id: new_structures_by_id,
|
|
||||||
next_descr_id: new_next_descr_id
|
|
||||||
}
|
|
||||||
|
|
||||||
new_typing_ctx = Map.put(typing_ctx, :type_store, new_type_store)
|
|
||||||
{new_typing_ctx, descr_id}
|
|
||||||
|
|
||||||
existing_descr_id ->
|
|
||||||
# Descr found
|
|
||||||
{typing_ctx, existing_descr_id}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
Retrieves the `Descr` map from the type store given its ID.
|
|
||||||
Returns the `Descr` map or `nil` if not found.
|
|
||||||
"""
|
|
||||||
def get_descr_by_id(typing_ctx, descr_id) do
|
|
||||||
with %{type_store: %{structures_by_id: structures_by_id}} <- typing_ctx,
|
|
||||||
descr when not is_nil(descr) <- Map.get(structures_by_id, descr_id) do
|
|
||||||
descr
|
|
||||||
else
|
|
||||||
_ -> nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
1
mix.exs
1
mix.exs
@ -26,6 +26,7 @@ defmodule Til.MixProject do
|
|||||||
[
|
[
|
||||||
# {:dep_from_hexpm, "~> 0.3.0"},
|
# {:dep_from_hexpm, "~> 0.3.0"},
|
||||||
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
|
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
|
||||||
|
{:ex_unit_summary, "~> 0.1.0", only: [:dev, :test]}
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
3
mix.lock
Normal file
3
mix.lock
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
%{
|
||||||
|
"ex_unit_summary": {:hex, :ex_unit_summary, "0.1.0", "7b0352afc5e6a933c805df0a539b66b392ac12ba74d8b208db7d83f77cb57049", [:mix], [], "hexpm", "8c87d0deade3657102902251d2ec60b5b94560004ce0e2c2fa5b466232716bd6"},
|
||||||
|
}
|
||||||
@ -151,7 +151,8 @@ defmodule Til.TestHelpers do
|
|||||||
{original_key, deep_strip_id(original_value, nodes_map)}
|
{original_key, deep_strip_id(original_value, nodes_map)}
|
||||||
|
|
||||||
is_list(original_value) ->
|
is_list(original_value) ->
|
||||||
{original_key, deep_strip_id(original_value, nodes_map)} # Handles lists of type defs
|
# Handles lists of type defs
|
||||||
|
{original_key, deep_strip_id(original_value, nodes_map)}
|
||||||
|
|
||||||
true ->
|
true ->
|
||||||
{original_key, original_value}
|
{original_key, original_value}
|
||||||
@ -165,7 +166,8 @@ defmodule Til.TestHelpers do
|
|||||||
# Recursively call on elements for lists of type definitions
|
# Recursively call on elements for lists of type definitions
|
||||||
Enum.map(type_definition, &deep_strip_id(&1, nodes_map))
|
Enum.map(type_definition, &deep_strip_id(&1, nodes_map))
|
||||||
|
|
||||||
true -> # Literals, atoms, numbers, nil, etc. (leaf nodes in the type structure)
|
# Literals, atoms, numbers, nil, etc. (leaf nodes in the type structure)
|
||||||
|
true ->
|
||||||
type_definition
|
type_definition
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1 +1,6 @@
|
|||||||
ExUnit.start()
|
ExUnit.start()
|
||||||
|
|
||||||
|
ExUnitSummary.start(:normal, %ExUnitSummary.Config{filter_results: :failed, print_delay: 100})
|
||||||
|
|
||||||
|
# Add ExUnitSummary.Formatter to list of ExUnit's formatters.
|
||||||
|
ExUnit.configure(formatters: [ExUnit.CLIFormatter, ExUnitSummary.Formatter])
|
||||||
|
|||||||
@ -1,8 +1,434 @@
|
|||||||
defmodule TilTest do
|
defmodule TddSystemTest do
|
||||||
use ExUnit.Case
|
# Most tests mutate Tdd.Store, so they cannot run concurrently.
|
||||||
doctest Til
|
use ExUnit.Case, async: false
|
||||||
|
|
||||||
test "greets the world" do
|
alias Tdd.TypeSpec
|
||||||
assert Til.hello() == :world
|
alias Tdd.Store
|
||||||
|
alias Tdd.Variable
|
||||||
|
alias Tdd.Compiler
|
||||||
|
alias Tdd.Consistency.Engine
|
||||||
|
alias Tdd.Algo
|
||||||
|
|
||||||
|
# Helper to mimic the old test structure and provide better failure messages
|
||||||
|
# for spec comparisons.
|
||||||
|
defp assert_spec_normalized(expected, input_spec) do
|
||||||
|
result = TypeSpec.normalize(input_spec)
|
||||||
|
# The normalization process should produce a canonical, sorted form.
|
||||||
|
assert expected == result, """
|
||||||
|
Input Spec:
|
||||||
|
#{inspect(input_spec, pretty: true)}
|
||||||
|
|
||||||
|
Expected Normalized:
|
||||||
|
#{inspect(expected, pretty: true)}
|
||||||
|
|
||||||
|
Actual Normalized:
|
||||||
|
#{inspect(result, pretty: true)}
|
||||||
|
"""
|
||||||
|
end
|
||||||
|
|
||||||
|
# Helper to check for equivalence by comparing TDD IDs.
|
||||||
|
defmacro assert_equivalent_specs(spec1, spec2) do
|
||||||
|
quote do
|
||||||
|
assert Compiler.spec_to_id(unquote(spec1)) == Compiler.spec_to_id(unquote(spec2))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Helper to check for subtyping using the TDD compiler.
|
||||||
|
defmacro assert_subtype(spec1, spec2) do
|
||||||
|
quote do
|
||||||
|
assert Compiler.is_subtype(unquote(spec1), unquote(spec2))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defmacro refute_subtype(spec1, spec2) do
|
||||||
|
quote do
|
||||||
|
refute Compiler.is_subtype(unquote(spec1), unquote(spec2))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Setup block that initializes the Tdd.Store before each test.
|
||||||
|
# This ensures that node IDs and caches are clean for every test case.
|
||||||
|
setup do
|
||||||
|
Tdd.Store.init()
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# Tdd.Store Tests
|
||||||
|
# These tests validate the lowest-level state management of the TDD system.
|
||||||
|
# The Store is responsible for creating and storing the nodes of the decision diagram graph.
|
||||||
|
# ---
|
||||||
|
describe "Tdd.Store: Core state management for the TDD graph" do
|
||||||
|
@doc """
|
||||||
|
Tests that the store initializes with the correct, reserved IDs for the
|
||||||
|
terminal nodes representing TRUE (:any) and FALSE (:none).
|
||||||
|
"""
|
||||||
|
test "initialization and terminals" do
|
||||||
|
assert Store.true_node_id() == 1
|
||||||
|
assert Store.false_node_id() == 0
|
||||||
|
assert Store.get_node(1) == {:ok, :true_terminal}
|
||||||
|
assert Store.get_node(0) == {:ok, :false_terminal}
|
||||||
|
assert Store.get_node(99) == {:error, :not_found}
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests the core functionality of creating nodes. It verifies that new nodes receive
|
||||||
|
incrementing IDs and that requesting an identical node reuses the existing one
|
||||||
|
(structural sharing), which is fundamental to the efficiency of TDDs.
|
||||||
|
"""
|
||||||
|
test "node creation and structural sharing" do
|
||||||
|
var_a = {:is_atom}
|
||||||
|
var_b = {:is_integer}
|
||||||
|
true_id = Store.true_node_id()
|
||||||
|
false_id = Store.false_node_id()
|
||||||
|
|
||||||
|
# First created node gets ID 2 (after 0 and 1 are taken by terminals)
|
||||||
|
id1 = Store.find_or_create_node(var_a, true_id, false_id, false_id)
|
||||||
|
assert id1 == 2
|
||||||
|
assert Store.get_node(id1) == {:ok, {var_a, true_id, false_id, false_id}}
|
||||||
|
|
||||||
|
# Second, different node gets the next ID
|
||||||
|
id2 = Store.find_or_create_node(var_b, id1, false_id, false_id)
|
||||||
|
assert id2 == 3
|
||||||
|
|
||||||
|
# Creating the first node again returns the same ID, not a new one
|
||||||
|
id1_again = Store.find_or_create_node(var_a, true_id, false_id, false_id)
|
||||||
|
assert id1_again == id1
|
||||||
|
|
||||||
|
# Next new node gets the correct subsequent ID, proving no ID was wasted
|
||||||
|
id3 = Store.find_or_create_node(var_b, true_id, false_id, false_id)
|
||||||
|
assert id3 == 4
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests a key reduction rule: if a node's 'yes', 'no', and 'don't care' branches
|
||||||
|
all point to the same child node, the parent node is redundant and should be
|
||||||
|
replaced by the child node itself.
|
||||||
|
"""
|
||||||
|
test "node reduction rule for identical children" do
|
||||||
|
var_a = {:is_atom}
|
||||||
|
# from previous test logic
|
||||||
|
id3 = 4
|
||||||
|
id_redundant = Store.find_or_create_node(var_a, id3, id3, id3)
|
||||||
|
assert id_redundant == id3
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests the memoization cache for operations like 'apply', 'negate', etc.
|
||||||
|
This ensures that repeated operations with the same inputs do not trigger
|
||||||
|
redundant computations.
|
||||||
|
"""
|
||||||
|
test "operation caching" do
|
||||||
|
cache_key = {:my_op, 1, 2}
|
||||||
|
assert Store.get_op_cache(cache_key) == :not_found
|
||||||
|
|
||||||
|
Store.put_op_cache(cache_key, :my_result)
|
||||||
|
assert Store.get_op_cache(cache_key) == {:ok, :my_result}
|
||||||
|
|
||||||
|
Store.put_op_cache(cache_key, :new_result)
|
||||||
|
assert Store.get_op_cache(cache_key) == {:ok, :new_result}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# Tdd.TypeSpec.normalize/1 Tests
|
||||||
|
# These tests focus on ensuring the `normalize` function correctly transforms
|
||||||
|
# any TypeSpec into its canonical, simplified form.
|
||||||
|
# ---
|
||||||
|
describe "Tdd.TypeSpec.normalize/1: Base & Simple Types" do
|
||||||
|
@doc "Tests that normalizing already-simple specs doesn't change them (idempotency)."
|
||||||
|
test "normalizing :any is idempotent" do
|
||||||
|
assert_spec_normalized(:any, :any)
|
||||||
|
end
|
||||||
|
|
||||||
|
test "normalizing :none is idempotent" do
|
||||||
|
assert_spec_normalized(:none, :none)
|
||||||
|
end
|
||||||
|
|
||||||
|
test "normalizing :atom is idempotent" do
|
||||||
|
assert_spec_normalized(:atom, :atom)
|
||||||
|
end
|
||||||
|
|
||||||
|
test "normalizing a literal is idempotent" do
|
||||||
|
assert_spec_normalized({:literal, :foo}, {:literal, :foo})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "Tdd.TypeSpec.normalize/1: Double Negation" do
|
||||||
|
@doc "Tests the logical simplification that ¬(¬A) is equivalent to A."
|
||||||
|
test "¬(¬atom) simplifies to atom" do
|
||||||
|
assert_spec_normalized(:atom, {:negation, {:negation, :atom}})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests that a single negation is preserved when it cannot be simplified further."
|
||||||
|
test "A single negation is preserved" do
|
||||||
|
assert_spec_normalized({:negation, :integer}, {:negation, :integer})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests that an odd number of negations simplifies to a single negation."
|
||||||
|
test "¬(¬(¬atom)) simplifies to ¬atom" do
|
||||||
|
assert_spec_normalized({:negation, :atom}, {:negation, {:negation, {:negation, :atom}}})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "Tdd.TypeSpec.normalize/1: Union Normalization" do
|
||||||
|
@doc """
|
||||||
|
Tests that unions are canonicalized by flattening nested unions, sorting the members,
|
||||||
|
and removing duplicates. e.g., `int | (list | atom | int)` becomes `(atom | int | list)`.
|
||||||
|
"""
|
||||||
|
test "flattens, sorts, and uniques members" do
|
||||||
|
input = {:union, [:integer, {:union, [:list, :atom, :integer]}]}
|
||||||
|
expected = {:union, [:atom, :integer, :list]}
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests `A | none` simplifies to `A`, as `:none` is the identity for union."
|
||||||
|
test "simplifies a union with :none (A | none -> A)" do
|
||||||
|
assert_spec_normalized(:atom, {:union, [:atom, :none]})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests `A | any` simplifies to `any`, as `:any` is the absorbing element for union."
|
||||||
|
test "simplifies a union with :any (A | any -> any)" do
|
||||||
|
assert_spec_normalized(:any, {:union, [:atom, :any]})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "An empty set of types is logically equivalent to `:none`."
|
||||||
|
test "an empty union simplifies to :none" do
|
||||||
|
assert_spec_normalized(:none, {:union, []})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "A union containing just one type should simplify to that type itself."
|
||||||
|
test "a union of a single element simplifies to the element itself" do
|
||||||
|
assert_spec_normalized(:atom, {:union, [:atom]})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "Tdd.TypeSpec.normalize/1: Intersection Normalization" do
|
||||||
|
@doc "Tests that intersections are canonicalized like unions (flatten, sort, unique)."
|
||||||
|
test "flattens, sorts, and uniques members" do
|
||||||
|
input = {:intersect, [:integer, {:intersect, [:list, :atom, :integer]}]}
|
||||||
|
expected = {:intersect, [:atom, :integer, :list]}
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests `A & any` simplifies to `A`, as `:any` is the identity for intersection."
|
||||||
|
test "simplifies an intersection with :any (A & any -> A)" do
|
||||||
|
assert_spec_normalized(:atom, {:intersect, [:atom, :any]})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests `A & none` simplifies to `none`, as `:none` is the absorbing element."
|
||||||
|
test "simplifies an intersection with :none (A & none -> none)" do
|
||||||
|
assert_spec_normalized(:none, {:intersect, [:atom, :none]})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "An intersection of zero types is logically `any` (no constraints)."
|
||||||
|
test "an empty intersection simplifies to :any" do
|
||||||
|
assert_spec_normalized(:any, {:intersect, []})
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "An intersection of one type simplifies to the type itself."
|
||||||
|
test "an intersection of a single element simplifies to the element itself" do
|
||||||
|
assert_spec_normalized(:atom, {:intersect, [:atom]})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "Tdd.TypeSpec.normalize/1: Subtype Reduction" do
|
||||||
|
@doc """
|
||||||
|
Tests a key simplification: if a union contains a type and its own subtype,
|
||||||
|
the subtype is redundant and should be removed. E.g., `(1 | integer)` is just `integer`.
|
||||||
|
Here, `:foo` and `:bar` are subtypes of `:atom`, so the union simplifies to `:atom`.
|
||||||
|
"""
|
||||||
|
test "(:foo | :bar | atom) simplifies to atom" do
|
||||||
|
input = {:union, [{:literal, :foo}, {:literal, :bar}, :atom]}
|
||||||
|
expected = :atom
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "Tdd.TypeSpec: Advanced Normalization (μ, Λ, Apply)" do
|
||||||
|
@doc """
|
||||||
|
Tests alpha-conversion for recursive types. The bound variable name (`:X`)
|
||||||
|
should be renamed to a canonical name (`:m_var0`) to ensure structural equality
|
||||||
|
regardless of the name chosen by the user.
|
||||||
|
"""
|
||||||
|
test "basic alpha-conversion for μ-variable" do
|
||||||
|
input = {:mu, :X, {:type_var, :X}}
|
||||||
|
expected = {:mu, :m_var0, {:type_var, :m_var0}}
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests that the syntactic sugar `{:list_of, T}` is correctly desugared into
|
||||||
|
its underlying recursive definition: `μT.[] | cons(T, μT)`.
|
||||||
|
"""
|
||||||
|
test "list_of(integer) normalizes to a μ-expression with canonical var" do
|
||||||
|
input = {:list_of, :integer}
|
||||||
|
|
||||||
|
expected =
|
||||||
|
{:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}}
|
||||||
|
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests beta-reduction (function application). Applying the identity function
|
||||||
|
`(ΛT.T)` to `integer` should result in `integer`.
|
||||||
|
"""
|
||||||
|
test "simple application: (ΛT.T) integer -> integer" do
|
||||||
|
input = {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]}
|
||||||
|
expected = :integer
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests a more complex beta-reduction. Applying a list constructor lambda
|
||||||
|
to `:atom` should produce the normalized form of `list_of(atom)`.
|
||||||
|
"""
|
||||||
|
test "application with structure: (ΛT. list_of(T)) atom -> list_of(atom) (normalized form)" do
|
||||||
|
input = {:type_apply, {:type_lambda, [:T], {:list_of, {:type_var, :T}}}, [:atom]}
|
||||||
|
expected = {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}}
|
||||||
|
assert_spec_normalized(expected, input)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# Tdd.Consistency.Engine Tests
|
||||||
|
# These tests validate the logic that detects contradictions in a set of predicate assumptions.
|
||||||
|
# ---
|
||||||
|
describe "Tdd.Consistency.Engine: Logic for detecting contradictions" do
|
||||||
|
# This setup is local to this describe block, which is fine.
|
||||||
|
setup do
|
||||||
|
Tdd.Store.init()
|
||||||
|
id_atom = Tdd.Compiler.spec_to_id(:atom)
|
||||||
|
%{id_atom: id_atom}
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "An empty set of assumptions has no contradictions."
|
||||||
|
test "an empty assumption map is consistent" do
|
||||||
|
assert Engine.check(%{}) == :consistent
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests that the engine uses predicate traits to find implied contradictions.
|
||||||
|
`v_atom_eq(:foo)` implies `v_is_atom()` is true, which contradicts the explicit
|
||||||
|
assumption that `v_is_atom()` is false.
|
||||||
|
"""
|
||||||
|
test "an implied contradiction is caught by expander" do
|
||||||
|
assumptions = %{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false}
|
||||||
|
assert Engine.check(assumptions) == :contradiction
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "A term cannot belong to two different primary types like :atom and :integer."
|
||||||
|
test "two primary types cannot both be true" do
|
||||||
|
assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true}
|
||||||
|
assert Engine.check(assumptions) == :contradiction
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "A list cannot be empty and simultaneously have properties on its head (which wouldn't exist)."
|
||||||
|
test "a list cannot be empty and have a head property", %{id_atom: id_atom} do
|
||||||
|
assumptions = %{
|
||||||
|
Variable.v_list_is_empty() => true,
|
||||||
|
Variable.v_list_head_pred(id_atom) => true
|
||||||
|
}
|
||||||
|
|
||||||
|
assert Engine.check(assumptions) == :contradiction
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests for logical contradictions in integer ranges."
|
||||||
|
test "int < 10 AND int > 20 is a contradiction" do
|
||||||
|
assumptions = %{
|
||||||
|
Variable.v_int_lt(10) => true,
|
||||||
|
Variable.v_int_gt(20) => true
|
||||||
|
}
|
||||||
|
|
||||||
|
assert Engine.check(assumptions) == :contradiction
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# Compiler & Algo Integration Tests
|
||||||
|
# These tests ensure that the high-level public APIs (`is_subtype`, `spec_to_id`)
|
||||||
|
# work correctly by integrating the compiler and the graph algorithms.
|
||||||
|
# ---
|
||||||
|
describe "Tdd.Compiler and Tdd.Algo Integration: High-level API validation" do
|
||||||
|
@doc "Verifies semantic equivalence of types using TDD IDs. e.g., `atom & any` is the same type as `atom`."
|
||||||
|
test "basic equivalences" do
|
||||||
|
assert_equivalent_specs({:intersect, [:atom, :any]}, :atom)
|
||||||
|
assert_equivalent_specs({:union, [:atom, :none]}, :atom)
|
||||||
|
assert_equivalent_specs({:intersect, [:atom, :integer]}, :none)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests the main `is_subtype` public API for simple, non-recursive types."
|
||||||
|
test "basic subtyping" do
|
||||||
|
assert_subtype({:literal, :foo}, :atom)
|
||||||
|
refute_subtype(:atom, {:literal, :foo})
|
||||||
|
assert_subtype(:none, :atom)
|
||||||
|
assert_subtype(:atom, :any)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests that impossible type intersections compile to the `:none` (FALSE) node."
|
||||||
|
test "contradictions" do
|
||||||
|
assert Compiler.spec_to_id({:intersect, [:atom, :integer]}) == Store.false_node_id()
|
||||||
|
|
||||||
|
assert Compiler.spec_to_id({:intersect, [{:literal, :foo}, {:literal, :bar}]}) ==
|
||||||
|
Store.false_node_id()
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# Tdd.Compiler Advanced Feature Tests
|
||||||
|
# These tests target the most complex features: recursive and polymorphic types.
|
||||||
|
# ---
|
||||||
|
describe "Tdd.Compiler: Advanced Features (μ, Λ, Apply)" do
|
||||||
|
@doc """
|
||||||
|
It checks for covariance in generic types: a list of integers is a subtype of a list of anything,
|
||||||
|
but the reverse is not true. This requires the system to correctly handle coinductive reasoning
|
||||||
|
on the recursive TDD nodes.
|
||||||
|
"""
|
||||||
|
test "the previously crashing recursive subtype test now passes" do
|
||||||
|
int_list = {:list_of, :integer}
|
||||||
|
any_list = {:list_of, :any}
|
||||||
|
assert_subtype(:integer, :any)
|
||||||
|
# The key test that was failing due to the bug
|
||||||
|
assert_subtype(int_list, any_list)
|
||||||
|
refute_subtype(any_list, int_list)
|
||||||
|
|
||||||
|
# Also test instances against the recursive type
|
||||||
|
assert_subtype({:cons, {:literal, 1}, {:literal, []}}, int_list)
|
||||||
|
refute_subtype({:cons, {:literal, :a}, {:literal, []}}, int_list)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc "Tests that manually-defined recursive types (like a binary tree) can be compiled and checked correctly."
|
||||||
|
test "explicit μ-types" do
|
||||||
|
leaf_node = {:literal, :empty_tree}
|
||||||
|
|
||||||
|
tree_spec =
|
||||||
|
{:mu, :Tree,
|
||||||
|
{:union,
|
||||||
|
[
|
||||||
|
leaf_node,
|
||||||
|
{:tuple, [:atom, {:type_var, :Tree}, {:type_var, :Tree}]}
|
||||||
|
]}}
|
||||||
|
|
||||||
|
# Test that it compiles to a valid TDD ID
|
||||||
|
assert is_integer(Compiler.spec_to_id(tree_spec))
|
||||||
|
|
||||||
|
# Test that an instance of the tree is correctly identified as a subtype
|
||||||
|
simple_tree_instance = {:tuple, [{:literal, :a}, leaf_node, leaf_node]}
|
||||||
|
assert_subtype(simple_tree_instance, tree_spec)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Tests that a polymorphic type created via lambda application is equivalent
|
||||||
|
to its manually specialized counterpart. e.g., `(List<T>)(int)` should be the
|
||||||
|
same as `List<int>`.
|
||||||
|
"""
|
||||||
|
test "polymorphism (Λ, Apply)" do
|
||||||
|
gen_list_lambda = {:type_lambda, [:Tparam], {:list_of, {:type_var, :Tparam}}}
|
||||||
|
list_of_int_from_apply = {:type_apply, gen_list_lambda, [:integer]}
|
||||||
|
int_list = {:list_of, :integer}
|
||||||
|
|
||||||
|
assert_equivalent_specs(list_of_int_from_apply, int_list)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,77 +1,77 @@
|
|||||||
defmodule Tilly.BDD.AtomBoolOpsTest do
|
# defmodule Tilly.BDD.AtomBoolOpsTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD.AtomBoolOps
|
# alias Tilly.BDD.AtomBoolOps
|
||||||
|
#
|
||||||
describe "compare_elements/2" do
|
# describe "compare_elements/2" do
|
||||||
test "correctly compares atoms" do
|
# test "correctly compares atoms" do
|
||||||
assert AtomBoolOps.compare_elements(:apple, :banana) == :lt
|
# assert AtomBoolOps.compare_elements(:apple, :banana) == :lt
|
||||||
assert AtomBoolOps.compare_elements(:banana, :apple) == :gt
|
# assert AtomBoolOps.compare_elements(:banana, :apple) == :gt
|
||||||
assert AtomBoolOps.compare_elements(:cherry, :cherry) == :eq
|
# assert AtomBoolOps.compare_elements(:cherry, :cherry) == :eq
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "equal_element?/2" do
|
# describe "equal_element?/2" do
|
||||||
test "correctly checks atom equality" do
|
# test "correctly checks atom equality" do
|
||||||
assert AtomBoolOps.equal_element?(:apple, :apple) == true
|
# assert AtomBoolOps.equal_element?(:apple, :apple) == true
|
||||||
assert AtomBoolOps.equal_element?(:apple, :banana) == false
|
# assert AtomBoolOps.equal_element?(:apple, :banana) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "hash_element/1" do
|
# describe "hash_element/1" do
|
||||||
test "hashes atoms consistently" do
|
# test "hashes atoms consistently" do
|
||||||
assert is_integer(AtomBoolOps.hash_element(:foo))
|
# assert is_integer(AtomBoolOps.hash_element(:foo))
|
||||||
assert AtomBoolOps.hash_element(:foo) == AtomBoolOps.hash_element(:foo)
|
# assert AtomBoolOps.hash_element(:foo) == AtomBoolOps.hash_element(:foo)
|
||||||
assert AtomBoolOps.hash_element(:foo) != AtomBoolOps.hash_element(:bar)
|
# assert AtomBoolOps.hash_element(:foo) != AtomBoolOps.hash_element(:bar)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "leaf operations" do
|
# describe "leaf operations" do
|
||||||
test "empty_leaf/0 returns false" do
|
# test "empty_leaf/0 returns false" do
|
||||||
assert AtomBoolOps.empty_leaf() == false
|
# assert AtomBoolOps.empty_leaf() == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "any_leaf/0 returns true" do
|
# test "any_leaf/0 returns true" do
|
||||||
assert AtomBoolOps.any_leaf() == true
|
# assert AtomBoolOps.any_leaf() == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_empty_leaf?/1" do
|
# test "is_empty_leaf?/1" do
|
||||||
assert AtomBoolOps.is_empty_leaf?(false) == true
|
# assert AtomBoolOps.is_empty_leaf?(false) == true
|
||||||
assert AtomBoolOps.is_empty_leaf?(true) == false
|
# assert AtomBoolOps.is_empty_leaf?(true) == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "union_leaves/3" do
|
# test "union_leaves/3" do
|
||||||
assert AtomBoolOps.union_leaves(%{}, false, false) == false
|
# assert AtomBoolOps.union_leaves(%{}, false, false) == false
|
||||||
assert AtomBoolOps.union_leaves(%{}, true, false) == true
|
# assert AtomBoolOps.union_leaves(%{}, true, false) == true
|
||||||
assert AtomBoolOps.union_leaves(%{}, false, true) == true
|
# assert AtomBoolOps.union_leaves(%{}, false, true) == true
|
||||||
assert AtomBoolOps.union_leaves(%{}, true, true) == true
|
# assert AtomBoolOps.union_leaves(%{}, true, true) == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "intersection_leaves/3" do
|
# test "intersection_leaves/3" do
|
||||||
assert AtomBoolOps.intersection_leaves(%{}, false, false) == false
|
# assert AtomBoolOps.intersection_leaves(%{}, false, false) == false
|
||||||
assert AtomBoolOps.intersection_leaves(%{}, true, false) == false
|
# assert AtomBoolOps.intersection_leaves(%{}, true, false) == false
|
||||||
assert AtomBoolOps.intersection_leaves(%{}, false, true) == false
|
# assert AtomBoolOps.intersection_leaves(%{}, false, true) == false
|
||||||
assert AtomBoolOps.intersection_leaves(%{}, true, true) == true
|
# assert AtomBoolOps.intersection_leaves(%{}, true, true) == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "negation_leaf/2" do
|
# test "negation_leaf/2" do
|
||||||
assert AtomBoolOps.negation_leaf(%{}, false) == true
|
# assert AtomBoolOps.negation_leaf(%{}, false) == true
|
||||||
assert AtomBoolOps.negation_leaf(%{}, true) == false
|
# assert AtomBoolOps.negation_leaf(%{}, true) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "test_leaf_value/1" do
|
# describe "test_leaf_value/1" do
|
||||||
test "returns :empty for false" do
|
# test "returns :empty for false" do
|
||||||
assert AtomBoolOps.test_leaf_value(false) == :empty
|
# assert AtomBoolOps.test_leaf_value(false) == :empty
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns :full for true" do
|
# test "returns :full for true" do
|
||||||
assert AtomBoolOps.test_leaf_value(true) == :full
|
# assert AtomBoolOps.test_leaf_value(true) == :full
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
# Conceptual test if atoms had other leaf values
|
# # Conceptual test if atoms had other leaf values
|
||||||
# test "returns :other for other values" do
|
# # test "returns :other for other values" do
|
||||||
# assert AtomBoolOps.test_leaf_value(:some_other_leaf_marker) == :other
|
# # assert AtomBoolOps.test_leaf_value(:some_other_leaf_marker) == :other
|
||||||
|
# # end
|
||||||
|
# end
|
||||||
# end
|
# end
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|||||||
@ -1,67 +1,67 @@
|
|||||||
defmodule Tilly.BDD.IntegerBoolOpsTest do
|
# defmodule Tilly.BDD.IntegerBoolOpsTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD.IntegerBoolOps
|
# alias Tilly.BDD.IntegerBoolOps
|
||||||
|
#
|
||||||
describe "compare_elements/2" do
|
# describe "compare_elements/2" do
|
||||||
test "correctly compares integers" do
|
# test "correctly compares integers" do
|
||||||
assert IntegerBoolOps.compare_elements(1, 2) == :lt
|
# assert IntegerBoolOps.compare_elements(1, 2) == :lt
|
||||||
assert IntegerBoolOps.compare_elements(2, 1) == :gt
|
# assert IntegerBoolOps.compare_elements(2, 1) == :gt
|
||||||
assert IntegerBoolOps.compare_elements(1, 1) == :eq
|
# assert IntegerBoolOps.compare_elements(1, 1) == :eq
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "equal_element?/2" do
|
# describe "equal_element?/2" do
|
||||||
test "correctly checks equality of integers" do
|
# test "correctly checks equality of integers" do
|
||||||
assert IntegerBoolOps.equal_element?(1, 1) == true
|
# assert IntegerBoolOps.equal_element?(1, 1) == true
|
||||||
assert IntegerBoolOps.equal_element?(1, 2) == false
|
# assert IntegerBoolOps.equal_element?(1, 2) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "hash_element/1" do
|
# describe "hash_element/1" do
|
||||||
test "returns the integer itself as hash" do
|
# test "returns the integer itself as hash" do
|
||||||
assert IntegerBoolOps.hash_element(123) == 123
|
# assert IntegerBoolOps.hash_element(123) == 123
|
||||||
assert IntegerBoolOps.hash_element(-5) == -5
|
# assert IntegerBoolOps.hash_element(-5) == -5
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "leaf operations" do
|
# describe "leaf operations" do
|
||||||
test "empty_leaf/0 returns false" do
|
# test "empty_leaf/0 returns false" do
|
||||||
assert IntegerBoolOps.empty_leaf() == false
|
# assert IntegerBoolOps.empty_leaf() == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "any_leaf/0 returns true" do
|
# test "any_leaf/0 returns true" do
|
||||||
assert IntegerBoolOps.any_leaf() == true
|
# assert IntegerBoolOps.any_leaf() == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_empty_leaf?/1" do
|
# test "is_empty_leaf?/1" do
|
||||||
assert IntegerBoolOps.is_empty_leaf?(false) == true
|
# assert IntegerBoolOps.is_empty_leaf?(false) == true
|
||||||
assert IntegerBoolOps.is_empty_leaf?(true) == false
|
# assert IntegerBoolOps.is_empty_leaf?(true) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "union_leaves/3" do
|
# describe "union_leaves/3" do
|
||||||
test "computes boolean OR" do
|
# test "computes boolean OR" do
|
||||||
assert IntegerBoolOps.union_leaves(%{}, true, true) == true
|
# assert IntegerBoolOps.union_leaves(%{}, true, true) == true
|
||||||
assert IntegerBoolOps.union_leaves(%{}, true, false) == true
|
# assert IntegerBoolOps.union_leaves(%{}, true, false) == true
|
||||||
assert IntegerBoolOps.union_leaves(%{}, false, true) == true
|
# assert IntegerBoolOps.union_leaves(%{}, false, true) == true
|
||||||
assert IntegerBoolOps.union_leaves(%{}, false, false) == false
|
# assert IntegerBoolOps.union_leaves(%{}, false, false) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "intersection_leaves/3" do
|
# describe "intersection_leaves/3" do
|
||||||
test "computes boolean AND" do
|
# test "computes boolean AND" do
|
||||||
assert IntegerBoolOps.intersection_leaves(%{}, true, true) == true
|
# assert IntegerBoolOps.intersection_leaves(%{}, true, true) == true
|
||||||
assert IntegerBoolOps.intersection_leaves(%{}, true, false) == false
|
# assert IntegerBoolOps.intersection_leaves(%{}, true, false) == false
|
||||||
assert IntegerBoolOps.intersection_leaves(%{}, false, true) == false
|
# assert IntegerBoolOps.intersection_leaves(%{}, false, true) == false
|
||||||
assert IntegerBoolOps.intersection_leaves(%{}, false, false) == false
|
# assert IntegerBoolOps.intersection_leaves(%{}, false, false) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "negation_leaf/2" do
|
# describe "negation_leaf/2" do
|
||||||
test "computes boolean NOT" do
|
# test "computes boolean NOT" do
|
||||||
assert IntegerBoolOps.negation_leaf(%{}, true) == false
|
# assert IntegerBoolOps.negation_leaf(%{}, true) == false
|
||||||
assert IntegerBoolOps.negation_leaf(%{}, false) == true
|
# assert IntegerBoolOps.negation_leaf(%{}, false) == true
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,123 +1,123 @@
|
|||||||
defmodule Tilly.BDD.NodeTest do
|
# defmodule Tilly.BDD.NodeTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD.Node
|
# alias Tilly.BDD.Node
|
||||||
|
#
|
||||||
describe "Smart Constructors" do
|
# describe "Smart Constructors" do
|
||||||
test "mk_true/0 returns true" do
|
# test "mk_true/0 returns true" do
|
||||||
assert Node.mk_true() == true
|
# assert Node.mk_true() == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "mk_false/0 returns false" do
|
# test "mk_false/0 returns false" do
|
||||||
assert Node.mk_false() == false
|
# assert Node.mk_false() == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "mk_leaf/1 creates a leaf node" do
|
# test "mk_leaf/1 creates a leaf node" do
|
||||||
assert Node.mk_leaf(:some_value) == {:leaf, :some_value}
|
# assert Node.mk_leaf(:some_value) == {:leaf, :some_value}
|
||||||
assert Node.mk_leaf(123) == {:leaf, 123}
|
# assert Node.mk_leaf(123) == {:leaf, 123}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "mk_split/4 creates a split node" do
|
# test "mk_split/4 creates a split node" do
|
||||||
assert Node.mk_split(:el, :p_id, :i_id, :n_id) == {:split, :el, :p_id, :i_id, :n_id}
|
# assert Node.mk_split(:el, :p_id, :i_id, :n_id) == {:split, :el, :p_id, :i_id, :n_id}
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "Predicates" do
|
# describe "Predicates" do
|
||||||
setup do
|
# setup do
|
||||||
%{
|
# %{
|
||||||
true_node: Node.mk_true(),
|
# true_node: Node.mk_true(),
|
||||||
false_node: Node.mk_false(),
|
# false_node: Node.mk_false(),
|
||||||
leaf_node: Node.mk_leaf("data"),
|
# leaf_node: Node.mk_leaf("data"),
|
||||||
split_node: Node.mk_split(1, 2, 3, 4)
|
# split_node: Node.mk_split(1, 2, 3, 4)
|
||||||
}
|
# }
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_true?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
# test "is_true?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
||||||
assert Node.is_true?(t) == true
|
# assert Node.is_true?(t) == true
|
||||||
assert Node.is_true?(f) == false
|
# assert Node.is_true?(f) == false
|
||||||
assert Node.is_true?(l) == false
|
# assert Node.is_true?(l) == false
|
||||||
assert Node.is_true?(s) == false
|
# assert Node.is_true?(s) == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_false?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
# test "is_false?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
||||||
assert Node.is_false?(f) == true
|
# assert Node.is_false?(f) == true
|
||||||
assert Node.is_false?(t) == false
|
# assert Node.is_false?(t) == false
|
||||||
assert Node.is_false?(l) == false
|
# assert Node.is_false?(l) == false
|
||||||
assert Node.is_false?(s) == false
|
# assert Node.is_false?(s) == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_leaf?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
# test "is_leaf?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
||||||
assert Node.is_leaf?(l) == true
|
# assert Node.is_leaf?(l) == true
|
||||||
assert Node.is_leaf?(t) == false
|
# assert Node.is_leaf?(t) == false
|
||||||
assert Node.is_leaf?(f) == false
|
# assert Node.is_leaf?(f) == false
|
||||||
assert Node.is_leaf?(s) == false
|
# assert Node.is_leaf?(s) == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_split?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
# test "is_split?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do
|
||||||
assert Node.is_split?(s) == true
|
# assert Node.is_split?(s) == true
|
||||||
assert Node.is_split?(t) == false
|
# assert Node.is_split?(t) == false
|
||||||
assert Node.is_split?(f) == false
|
# assert Node.is_split?(f) == false
|
||||||
assert Node.is_split?(l) == false
|
# assert Node.is_split?(l) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "Accessors" do
|
# describe "Accessors" do
|
||||||
setup do
|
# setup do
|
||||||
%{
|
# %{
|
||||||
leaf_node: Node.mk_leaf("leaf_data"),
|
# leaf_node: Node.mk_leaf("leaf_data"),
|
||||||
split_node: Node.mk_split(:elem_id, :pos_child, :ign_child, :neg_child)
|
# split_node: Node.mk_split(:elem_id, :pos_child, :ign_child, :neg_child)
|
||||||
}
|
# }
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "value/1 for leaf node", %{leaf_node: l} do
|
# test "value/1 for leaf node", %{leaf_node: l} do
|
||||||
assert Node.value(l) == "leaf_data"
|
# assert Node.value(l) == "leaf_data"
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "value/1 raises for non-leaf node" do
|
# test "value/1 raises for non-leaf node" do
|
||||||
assert_raise ArgumentError, ~r/Not a leaf node/, fn -> Node.value(Node.mk_true()) end
|
# assert_raise ArgumentError, ~r/Not a leaf node/, fn -> Node.value(Node.mk_true()) end
|
||||||
|
#
|
||||||
assert_raise ArgumentError, ~r/Not a leaf node/, fn ->
|
# assert_raise ArgumentError, ~r/Not a leaf node/, fn ->
|
||||||
Node.value(Node.mk_split(1, 2, 3, 4))
|
# Node.value(Node.mk_split(1, 2, 3, 4))
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "element/1 for split node", %{split_node: s} do
|
# test "element/1 for split node", %{split_node: s} do
|
||||||
assert Node.element(s) == :elem_id
|
# assert Node.element(s) == :elem_id
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "element/1 raises for non-split node" do
|
# test "element/1 raises for non-split node" do
|
||||||
assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_true()) end
|
# assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_true()) end
|
||||||
assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_leaf(1)) end
|
# assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_leaf(1)) end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "positive_child/1 for split node", %{split_node: s} do
|
# test "positive_child/1 for split node", %{split_node: s} do
|
||||||
assert Node.positive_child(s) == :pos_child
|
# assert Node.positive_child(s) == :pos_child
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "positive_child/1 raises for non-split node" do
|
# test "positive_child/1 raises for non-split node" do
|
||||||
assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
# assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
||||||
Node.positive_child(Node.mk_leaf(1))
|
# Node.positive_child(Node.mk_leaf(1))
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "ignore_child/1 for split node", %{split_node: s} do
|
# test "ignore_child/1 for split node", %{split_node: s} do
|
||||||
assert Node.ignore_child(s) == :ign_child
|
# assert Node.ignore_child(s) == :ign_child
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "ignore_child/1 raises for non-split node" do
|
# test "ignore_child/1 raises for non-split node" do
|
||||||
assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
# assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
||||||
Node.ignore_child(Node.mk_leaf(1))
|
# Node.ignore_child(Node.mk_leaf(1))
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "negative_child/1 for split node", %{split_node: s} do
|
# test "negative_child/1 for split node", %{split_node: s} do
|
||||||
assert Node.negative_child(s) == :neg_child
|
# assert Node.negative_child(s) == :neg_child
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "negative_child/1 raises for non-split node" do
|
# test "negative_child/1 raises for non-split node" do
|
||||||
assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
# assert_raise ArgumentError, ~r/Not a split node/, fn ->
|
||||||
Node.negative_child(Node.mk_leaf(1))
|
# Node.negative_child(Node.mk_leaf(1))
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,191 +1,191 @@
|
|||||||
defmodule Tilly.BDD.OpsTest do
|
# defmodule Tilly.BDD.OpsTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD
|
# alias Tilly.BDD
|
||||||
alias Tilly.BDD.Node
|
# alias Tilly.BDD.Node
|
||||||
alias Tilly.BDD.Ops
|
# alias Tilly.BDD.Ops
|
||||||
alias Tilly.BDD.IntegerBoolOps # Using a concrete ops_module for testing
|
# alias Tilly.BDD.IntegerBoolOps # Using a concrete ops_module for testing
|
||||||
|
#
|
||||||
setup do
|
# setup do
|
||||||
typing_ctx = BDD.init_bdd_store(%{})
|
# typing_ctx = BDD.init_bdd_store(%{})
|
||||||
# Pre-intern some common elements for tests if needed, e.g., integers
|
# # Pre-intern some common elements for tests if needed, e.g., integers
|
||||||
# For now, rely on ops to intern elements as they are used.
|
# # For now, rely on ops to intern elements as they are used.
|
||||||
%{initial_ctx: typing_ctx}
|
# %{initial_ctx: typing_ctx}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "leaf/3" do
|
# describe "leaf/3" do
|
||||||
test "interning an empty leaf value returns predefined false_id", %{initial_ctx: ctx} do
|
# test "interning an empty leaf value returns predefined false_id", %{initial_ctx: ctx} do
|
||||||
{new_ctx, node_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
# {new_ctx, node_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
assert node_id == BDD.false_node_id()
|
# assert node_id == BDD.false_node_id()
|
||||||
assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache # Cache not used for this path
|
# assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache # Cache not used for this path
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning a full leaf value returns predefined true_id", %{initial_ctx: ctx} do
|
# test "interning a full leaf value returns predefined true_id", %{initial_ctx: ctx} do
|
||||||
{new_ctx, node_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
# {new_ctx, node_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
||||||
assert node_id == BDD.true_node_id()
|
# assert node_id == BDD.true_node_id()
|
||||||
assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache
|
# assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
@tag :skip
|
# @tag :skip
|
||||||
test "interning a new 'other' leaf value returns a new ID", %{initial_ctx: _ctx} do
|
# test "interning a new 'other' leaf value returns a new ID", %{initial_ctx: _ctx} do
|
||||||
# Assuming IntegerBoolOps.test_leaf_value/1 would return :other for non-booleans
|
# # Assuming IntegerBoolOps.test_leaf_value/1 would return :other for non-booleans
|
||||||
# For this test, we'd need an ops_module where e.g. an integer is an :other leaf.
|
# # For this test, we'd need an ops_module where e.g. an integer is an :other leaf.
|
||||||
# Let's simulate with a mock or by extending IntegerBoolOps if it were not read-only.
|
# # Let's simulate with a mock or by extending IntegerBoolOps if it were not read-only.
|
||||||
# For now, this test is conceptual for boolean leaves.
|
# # For now, this test is conceptual for boolean leaves.
|
||||||
# If IntegerBoolOps was extended:
|
# # If IntegerBoolOps was extended:
|
||||||
# defmodule MockIntegerOps do
|
# # defmodule MockIntegerOps do
|
||||||
# defdelegate compare_elements(e1, e2), to: IntegerBoolOps
|
# # defdelegate compare_elements(e1, e2), to: IntegerBoolOps
|
||||||
# defdelegate equal_element?(e1, e2), to: IntegerBoolOps
|
# # defdelegate equal_element?(e1, e2), to: IntegerBoolOps
|
||||||
# # ... other delegates
|
# # # ... other delegates
|
||||||
# def test_leaf_value(10), do: :other # Treat 10 as a specific leaf
|
# # def test_leaf_value(10), do: :other # Treat 10 as a specific leaf
|
||||||
# def test_leaf_value(true), do: :full
|
# # def test_leaf_value(true), do: :full
|
||||||
# def test_leaf_value(false), do: :empty
|
# # def test_leaf_value(false), do: :empty
|
||||||
|
# # end
|
||||||
|
# # {ctx_after_intern, node_id} = Ops.leaf(ctx, 10, MockIntegerOps)
|
||||||
|
# # assert node_id != BDD.true_node_id() and node_id != BDD.false_node_id()
|
||||||
|
# # assert BDD.get_node_data(ctx_after_intern, node_id).structure == Node.mk_leaf(10)
|
||||||
|
# # Placeholder for more complex leaf types. Test is skipped.
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# describe "split/6 basic simplifications" do
|
||||||
|
# test "if i_id is true, returns true_id", %{initial_ctx: ctx} do
|
||||||
|
# {_p_ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy
|
||||||
|
# {_n_ctx, n_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy
|
||||||
|
# true_id = BDD.true_node_id()
|
||||||
|
#
|
||||||
|
# {new_ctx, result_id} = Ops.split(ctx, 10, p_id, true_id, n_id, IntegerBoolOps)
|
||||||
|
# assert result_id == true_id
|
||||||
|
# assert new_ctx == ctx # No new nodes or cache entries expected for this rule
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "if p_id == n_id and p_id == i_id, returns p_id", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) # some leaf
|
||||||
|
# i_id = p_id
|
||||||
|
# n_id = p_id
|
||||||
|
#
|
||||||
|
# {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps)
|
||||||
|
# assert result_id == p_id
|
||||||
|
# # Cache might be touched if union_bdds was called, but this rule is direct.
|
||||||
|
# # For p_id == i_id, it's direct.
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "if p_id == n_id and p_id != i_id, returns union(p_id, i_id)", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps)
|
||||||
|
# {ctx, i_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(true), IntegerBoolOps) # different leaf
|
||||||
|
# n_id = p_id
|
||||||
|
#
|
||||||
|
# # Expected union of p_id (false_leaf) and i_id (true_leaf) is true_id
|
||||||
|
# # This relies on union_bdds working.
|
||||||
|
# {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps)
|
||||||
|
# expected_union_id = BDD.true_node_id() # Union of false_leaf and true_leaf
|
||||||
|
# assert result_id == expected_union_id
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "interns a new split node if no simplification rule applies", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
||||||
|
# {ctx, i_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
||||||
|
# {ctx, n_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id (different from p_id)
|
||||||
|
#
|
||||||
|
# element = 20
|
||||||
|
# {new_ctx, split_node_id} = Ops.split(ctx, element, p_id, i_id, n_id, IntegerBoolOps)
|
||||||
|
#
|
||||||
|
# assert split_node_id != p_id and split_node_id != i_id and split_node_id != n_id
|
||||||
|
# assert split_node_id != BDD.true_node_id() and split_node_id != BDD.false_node_id()
|
||||||
|
#
|
||||||
|
# node_data = BDD.get_node_data(new_ctx, split_node_id)
|
||||||
|
# assert node_data.structure == Node.mk_split(element, p_id, i_id, n_id)
|
||||||
|
# assert node_data.ops_module == IntegerBoolOps
|
||||||
|
# assert new_ctx.bdd_store.next_node_id > ctx.bdd_store.next_node_id
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# describe "union_bdds/3" do
|
||||||
|
# test "A U A = A", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
||||||
|
# {new_ctx, result_id} = Ops.union_bdds(ctx, a_id, a_id)
|
||||||
|
# assert result_id == a_id
|
||||||
|
# assert Map.has_key?(new_ctx.bdd_store.ops_cache, {:union, a_id, a_id})
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "A U True = True", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# true_id = BDD.true_node_id()
|
||||||
|
# {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, true_id)
|
||||||
|
# assert result_id == true_id
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "A U False = A", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, a_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id
|
||||||
|
# false_id = BDD.false_node_id()
|
||||||
|
# {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, false_id)
|
||||||
|
# assert result_id == a_id
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "union of two distinct leaves", %{initial_ctx: ctx} do
|
||||||
|
# # leaf(false) U leaf(true) = leaf(true OR false) = leaf(true) -> true_node_id
|
||||||
|
# {ctx, leaf_false_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, leaf_true_id} = Ops.leaf(ctx, true, IntegerBoolOps) # This is BDD.true_node_id()
|
||||||
|
#
|
||||||
|
# {_new_ctx, result_id} = Ops.union_bdds(ctx, leaf_false_id, leaf_true_id)
|
||||||
|
# assert result_id == BDD.true_node_id()
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "union of two simple split nodes with same element", %{initial_ctx: ctx} do
|
||||||
|
# # BDD1: split(10, True, False, False)
|
||||||
|
# # BDD2: split(10, False, True, False)
|
||||||
|
# # Union: split(10, True U False, False U True, False U False)
|
||||||
|
# # = split(10, True, True, False)
|
||||||
|
#
|
||||||
|
# true_id = BDD.true_node_id()
|
||||||
|
# false_id = BDD.false_node_id()
|
||||||
|
#
|
||||||
|
# {ctx, bdd1_id} = Ops.split(ctx, 10, true_id, false_id, false_id, IntegerBoolOps)
|
||||||
|
# {ctx, bdd2_id} = Ops.split(ctx, 10, false_id, true_id, false_id, IntegerBoolOps)
|
||||||
|
#
|
||||||
|
# {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id)
|
||||||
|
#
|
||||||
|
# # Expected structure
|
||||||
|
# {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, true_id, true_id, false_id, IntegerBoolOps)
|
||||||
|
# assert union_id == expected_bdd_id
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "union of two simple split nodes with different elements (x1 < x2)", %{initial_ctx: ctx} do
|
||||||
|
# # BDD1: split(10, True, False, False)
|
||||||
|
# # BDD2: split(20, False, True, False)
|
||||||
|
# # Union (x1 < x2): split(10, p1, i1 U BDD2, n1)
|
||||||
|
# # = split(10, True, False U BDD2, False)
|
||||||
|
# # = split(10, True, BDD2, False)
|
||||||
|
#
|
||||||
|
# {ctx, bdd1_p1_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
||||||
|
# {ctx, bdd1_i1_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, bdd1_n1_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, bdd1_id} = Ops.split(ctx, 10, bdd1_p1_id, bdd1_i1_id, bdd1_n1_id, IntegerBoolOps)
|
||||||
|
#
|
||||||
|
# {ctx, bdd2_p2_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, bdd2_i2_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
||||||
|
# {ctx, bdd2_n2_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, bdd2_id} = Ops.split(ctx, 20, bdd2_p2_id, bdd2_i2_id, bdd2_n2_id, IntegerBoolOps)
|
||||||
|
#
|
||||||
|
# {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id)
|
||||||
|
#
|
||||||
|
# # Expected structure: split(10, True, BDD2, False)
|
||||||
|
# {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, bdd1_p1_id, bdd2_id, bdd1_n1_id, IntegerBoolOps)
|
||||||
|
# assert union_id == expected_bdd_id
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# test "uses cache for repeated union operations", %{initial_ctx: ctx} do
|
||||||
|
# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
||||||
|
# {ctx, b_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
||||||
|
#
|
||||||
|
# {ctx_after_first_union, _result1_id} = Ops.union_bdds(ctx, a_id, b_id)
|
||||||
|
# cache_after_first = ctx_after_first_union.bdd_store.ops_cache
|
||||||
|
#
|
||||||
|
# {ctx_after_second_union, _result2_id} = Ops.union_bdds(ctx_after_first_union, a_id, b_id)
|
||||||
|
# # The BDD store itself (nodes, next_id) should not change on a cache hit.
|
||||||
|
# # The ops_cache map reference will be the same if the result was cached.
|
||||||
|
# assert ctx_after_second_union.bdd_store.ops_cache == cache_after_first
|
||||||
|
# assert ctx_after_second_union.bdd_store.next_node_id == ctx_after_first_union.bdd_store.next_node_id
|
||||||
|
# end
|
||||||
|
# end
|
||||||
# end
|
# end
|
||||||
# {ctx_after_intern, node_id} = Ops.leaf(ctx, 10, MockIntegerOps)
|
|
||||||
# assert node_id != BDD.true_node_id() and node_id != BDD.false_node_id()
|
|
||||||
# assert BDD.get_node_data(ctx_after_intern, node_id).structure == Node.mk_leaf(10)
|
|
||||||
# Placeholder for more complex leaf types. Test is skipped.
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe "split/6 basic simplifications" do
|
|
||||||
test "if i_id is true, returns true_id", %{initial_ctx: ctx} do
|
|
||||||
{_p_ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy
|
|
||||||
{_n_ctx, n_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
|
|
||||||
{new_ctx, result_id} = Ops.split(ctx, 10, p_id, true_id, n_id, IntegerBoolOps)
|
|
||||||
assert result_id == true_id
|
|
||||||
assert new_ctx == ctx # No new nodes or cache entries expected for this rule
|
|
||||||
end
|
|
||||||
|
|
||||||
test "if p_id == n_id and p_id == i_id, returns p_id", %{initial_ctx: ctx} do
|
|
||||||
{ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) # some leaf
|
|
||||||
i_id = p_id
|
|
||||||
n_id = p_id
|
|
||||||
|
|
||||||
{_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps)
|
|
||||||
assert result_id == p_id
|
|
||||||
# Cache might be touched if union_bdds was called, but this rule is direct.
|
|
||||||
# For p_id == i_id, it's direct.
|
|
||||||
end
|
|
||||||
|
|
||||||
test "if p_id == n_id and p_id != i_id, returns union(p_id, i_id)", %{initial_ctx: ctx} do
|
|
||||||
{ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps)
|
|
||||||
{ctx, i_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(true), IntegerBoolOps) # different leaf
|
|
||||||
n_id = p_id
|
|
||||||
|
|
||||||
# Expected union of p_id (false_leaf) and i_id (true_leaf) is true_id
|
|
||||||
# This relies on union_bdds working.
|
|
||||||
{_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps)
|
|
||||||
expected_union_id = BDD.true_node_id() # Union of false_leaf and true_leaf
|
|
||||||
assert result_id == expected_union_id
|
|
||||||
end
|
|
||||||
|
|
||||||
test "interns a new split node if no simplification rule applies", %{initial_ctx: ctx} do
|
|
||||||
{ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
|
||||||
{ctx, i_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
|
||||||
{ctx, n_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id (different from p_id)
|
|
||||||
|
|
||||||
element = 20
|
|
||||||
{new_ctx, split_node_id} = Ops.split(ctx, element, p_id, i_id, n_id, IntegerBoolOps)
|
|
||||||
|
|
||||||
assert split_node_id != p_id and split_node_id != i_id and split_node_id != n_id
|
|
||||||
assert split_node_id != BDD.true_node_id() and split_node_id != BDD.false_node_id()
|
|
||||||
|
|
||||||
node_data = BDD.get_node_data(new_ctx, split_node_id)
|
|
||||||
assert node_data.structure == Node.mk_split(element, p_id, i_id, n_id)
|
|
||||||
assert node_data.ops_module == IntegerBoolOps
|
|
||||||
assert new_ctx.bdd_store.next_node_id > ctx.bdd_store.next_node_id
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe "union_bdds/3" do
|
|
||||||
test "A U A = A", %{initial_ctx: ctx} do
|
|
||||||
{ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id
|
|
||||||
{new_ctx, result_id} = Ops.union_bdds(ctx, a_id, a_id)
|
|
||||||
assert result_id == a_id
|
|
||||||
assert Map.has_key?(new_ctx.bdd_store.ops_cache, {:union, a_id, a_id})
|
|
||||||
end
|
|
||||||
|
|
||||||
test "A U True = True", %{initial_ctx: ctx} do
|
|
||||||
{ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
{_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, true_id)
|
|
||||||
assert result_id == true_id
|
|
||||||
end
|
|
||||||
|
|
||||||
test "A U False = A", %{initial_ctx: ctx} do
|
|
||||||
{ctx, a_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
{_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, false_id)
|
|
||||||
assert result_id == a_id
|
|
||||||
end
|
|
||||||
|
|
||||||
test "union of two distinct leaves", %{initial_ctx: ctx} do
|
|
||||||
# leaf(false) U leaf(true) = leaf(true OR false) = leaf(true) -> true_node_id
|
|
||||||
{ctx, leaf_false_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, leaf_true_id} = Ops.leaf(ctx, true, IntegerBoolOps) # This is BDD.true_node_id()
|
|
||||||
|
|
||||||
{_new_ctx, result_id} = Ops.union_bdds(ctx, leaf_false_id, leaf_true_id)
|
|
||||||
assert result_id == BDD.true_node_id()
|
|
||||||
end
|
|
||||||
|
|
||||||
test "union of two simple split nodes with same element", %{initial_ctx: ctx} do
|
|
||||||
# BDD1: split(10, True, False, False)
|
|
||||||
# BDD2: split(10, False, True, False)
|
|
||||||
# Union: split(10, True U False, False U True, False U False)
|
|
||||||
# = split(10, True, True, False)
|
|
||||||
|
|
||||||
true_id = BDD.true_node_id()
|
|
||||||
false_id = BDD.false_node_id()
|
|
||||||
|
|
||||||
{ctx, bdd1_id} = Ops.split(ctx, 10, true_id, false_id, false_id, IntegerBoolOps)
|
|
||||||
{ctx, bdd2_id} = Ops.split(ctx, 10, false_id, true_id, false_id, IntegerBoolOps)
|
|
||||||
|
|
||||||
{final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id)
|
|
||||||
|
|
||||||
# Expected structure
|
|
||||||
{_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, true_id, true_id, false_id, IntegerBoolOps)
|
|
||||||
assert union_id == expected_bdd_id
|
|
||||||
end
|
|
||||||
|
|
||||||
test "union of two simple split nodes with different elements (x1 < x2)", %{initial_ctx: ctx} do
|
|
||||||
# BDD1: split(10, True, False, False)
|
|
||||||
# BDD2: split(20, False, True, False)
|
|
||||||
# Union (x1 < x2): split(10, p1, i1 U BDD2, n1)
|
|
||||||
# = split(10, True, False U BDD2, False)
|
|
||||||
# = split(10, True, BDD2, False)
|
|
||||||
|
|
||||||
{ctx, bdd1_p1_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
|
||||||
{ctx, bdd1_i1_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, bdd1_n1_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, bdd1_id} = Ops.split(ctx, 10, bdd1_p1_id, bdd1_i1_id, bdd1_n1_id, IntegerBoolOps)
|
|
||||||
|
|
||||||
{ctx, bdd2_p2_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, bdd2_i2_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
|
||||||
{ctx, bdd2_n2_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, bdd2_id} = Ops.split(ctx, 20, bdd2_p2_id, bdd2_i2_id, bdd2_n2_id, IntegerBoolOps)
|
|
||||||
|
|
||||||
{final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id)
|
|
||||||
|
|
||||||
# Expected structure: split(10, True, BDD2, False)
|
|
||||||
{_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, bdd1_p1_id, bdd2_id, bdd1_n1_id, IntegerBoolOps)
|
|
||||||
assert union_id == expected_bdd_id
|
|
||||||
end
|
|
||||||
|
|
||||||
test "uses cache for repeated union operations", %{initial_ctx: ctx} do
|
|
||||||
{ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps)
|
|
||||||
{ctx, b_id} = Ops.leaf(ctx, true, IntegerBoolOps)
|
|
||||||
|
|
||||||
{ctx_after_first_union, _result1_id} = Ops.union_bdds(ctx, a_id, b_id)
|
|
||||||
cache_after_first = ctx_after_first_union.bdd_store.ops_cache
|
|
||||||
|
|
||||||
{ctx_after_second_union, _result2_id} = Ops.union_bdds(ctx_after_first_union, a_id, b_id)
|
|
||||||
# The BDD store itself (nodes, next_id) should not change on a cache hit.
|
|
||||||
# The ops_cache map reference will be the same if the result was cached.
|
|
||||||
assert ctx_after_second_union.bdd_store.ops_cache == cache_after_first
|
|
||||||
assert ctx_after_second_union.bdd_store.next_node_id == ctx_after_first_union.bdd_store.next_node_id
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|||||||
@ -1,72 +1,72 @@
|
|||||||
defmodule Tilly.BDD.StringBoolOpsTest do
|
# defmodule Tilly.BDD.StringBoolOpsTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD.StringBoolOps
|
# alias Tilly.BDD.StringBoolOps
|
||||||
|
#
|
||||||
describe "compare_elements/2" do
|
# describe "compare_elements/2" do
|
||||||
test "correctly compares strings" do
|
# test "correctly compares strings" do
|
||||||
assert StringBoolOps.compare_elements("apple", "banana") == :lt
|
# assert StringBoolOps.compare_elements("apple", "banana") == :lt
|
||||||
assert StringBoolOps.compare_elements("banana", "apple") == :gt
|
# assert StringBoolOps.compare_elements("banana", "apple") == :gt
|
||||||
assert StringBoolOps.compare_elements("cherry", "cherry") == :eq
|
# assert StringBoolOps.compare_elements("cherry", "cherry") == :eq
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "equal_element?/2" do
|
# describe "equal_element?/2" do
|
||||||
test "correctly checks string equality" do
|
# test "correctly checks string equality" do
|
||||||
assert StringBoolOps.equal_element?("apple", "apple") == true
|
# assert StringBoolOps.equal_element?("apple", "apple") == true
|
||||||
assert StringBoolOps.equal_element?("apple", "banana") == false
|
# assert StringBoolOps.equal_element?("apple", "banana") == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "hash_element/1" do
|
# describe "hash_element/1" do
|
||||||
test "hashes strings consistently" do
|
# test "hashes strings consistently" do
|
||||||
assert is_integer(StringBoolOps.hash_element("foo"))
|
# assert is_integer(StringBoolOps.hash_element("foo"))
|
||||||
assert StringBoolOps.hash_element("foo") == StringBoolOps.hash_element("foo")
|
# assert StringBoolOps.hash_element("foo") == StringBoolOps.hash_element("foo")
|
||||||
assert StringBoolOps.hash_element("foo") != StringBoolOps.hash_element("bar")
|
# assert StringBoolOps.hash_element("foo") != StringBoolOps.hash_element("bar")
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "leaf operations" do
|
# describe "leaf operations" do
|
||||||
test "empty_leaf/0 returns false" do
|
# test "empty_leaf/0 returns false" do
|
||||||
assert StringBoolOps.empty_leaf() == false
|
# assert StringBoolOps.empty_leaf() == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "any_leaf/0 returns true" do
|
# test "any_leaf/0 returns true" do
|
||||||
assert StringBoolOps.any_leaf() == true
|
# assert StringBoolOps.any_leaf() == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_empty_leaf?/1" do
|
# test "is_empty_leaf?/1" do
|
||||||
assert StringBoolOps.is_empty_leaf?(false) == true
|
# assert StringBoolOps.is_empty_leaf?(false) == true
|
||||||
assert StringBoolOps.is_empty_leaf?(true) == false
|
# assert StringBoolOps.is_empty_leaf?(true) == false
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "union_leaves/3" do
|
# test "union_leaves/3" do
|
||||||
assert StringBoolOps.union_leaves(%{}, false, false) == false
|
# assert StringBoolOps.union_leaves(%{}, false, false) == false
|
||||||
assert StringBoolOps.union_leaves(%{}, true, false) == true
|
# assert StringBoolOps.union_leaves(%{}, true, false) == true
|
||||||
assert StringBoolOps.union_leaves(%{}, false, true) == true
|
# assert StringBoolOps.union_leaves(%{}, false, true) == true
|
||||||
assert StringBoolOps.union_leaves(%{}, true, true) == true
|
# assert StringBoolOps.union_leaves(%{}, true, true) == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "intersection_leaves/3" do
|
# test "intersection_leaves/3" do
|
||||||
assert StringBoolOps.intersection_leaves(%{}, false, false) == false
|
# assert StringBoolOps.intersection_leaves(%{}, false, false) == false
|
||||||
assert StringBoolOps.intersection_leaves(%{}, true, false) == false
|
# assert StringBoolOps.intersection_leaves(%{}, true, false) == false
|
||||||
assert StringBoolOps.intersection_leaves(%{}, false, true) == false
|
# assert StringBoolOps.intersection_leaves(%{}, false, true) == false
|
||||||
assert StringBoolOps.intersection_leaves(%{}, true, true) == true
|
# assert StringBoolOps.intersection_leaves(%{}, true, true) == true
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "negation_leaf/2" do
|
# test "negation_leaf/2" do
|
||||||
assert StringBoolOps.negation_leaf(%{}, false) == true
|
# assert StringBoolOps.negation_leaf(%{}, false) == true
|
||||||
assert StringBoolOps.negation_leaf(%{}, true) == false
|
# assert StringBoolOps.negation_leaf(%{}, true) == false
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "test_leaf_value/1" do
|
# describe "test_leaf_value/1" do
|
||||||
test "returns :empty for false" do
|
# test "returns :empty for false" do
|
||||||
assert StringBoolOps.test_leaf_value(false) == :empty
|
# assert StringBoolOps.test_leaf_value(false) == :empty
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns :full for true" do
|
# test "returns :full for true" do
|
||||||
assert StringBoolOps.test_leaf_value(true) == :full
|
# assert StringBoolOps.test_leaf_value(true) == :full
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,163 +1,163 @@
|
|||||||
defmodule Tilly.BDDTest do
|
# defmodule Tilly.BDDTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD.Node
|
# alias Tilly.BDD.Node
|
||||||
|
#
|
||||||
describe "init_bdd_store/1" do
|
# describe "init_bdd_store/1" do
|
||||||
test "initializes bdd_store in typing_ctx with predefined false and true nodes" do
|
# test "initializes bdd_store in typing_ctx with predefined false and true nodes" do
|
||||||
typing_ctx = %{}
|
# typing_ctx = %{}
|
||||||
new_ctx = Tilly.BDD.init_bdd_store(typing_ctx)
|
# new_ctx = Tilly.BDD.init_bdd_store(typing_ctx)
|
||||||
|
#
|
||||||
assert %{bdd_store: bdd_store} = new_ctx
|
# assert %{bdd_store: bdd_store} = new_ctx
|
||||||
assert is_map(bdd_store.nodes_by_structure)
|
# assert is_map(bdd_store.nodes_by_structure)
|
||||||
assert is_map(bdd_store.structures_by_id)
|
# assert is_map(bdd_store.structures_by_id)
|
||||||
assert bdd_store.next_node_id == 2 # 0 for false, 1 for true
|
# assert bdd_store.next_node_id == 2 # 0 for false, 1 for true
|
||||||
assert bdd_store.ops_cache == %{}
|
# assert bdd_store.ops_cache == %{}
|
||||||
|
#
|
||||||
# Check false node
|
# # Check false node
|
||||||
false_id = Tilly.BDD.false_node_id()
|
# false_id = Tilly.BDD.false_node_id()
|
||||||
false_ops_module = Tilly.BDD.universal_ops_module()
|
# false_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
assert bdd_store.nodes_by_structure[{Node.mk_false(), false_ops_module}] == false_id
|
# assert bdd_store.nodes_by_structure[{Node.mk_false(), false_ops_module}] == false_id
|
||||||
assert bdd_store.structures_by_id[false_id] == %{structure: Node.mk_false(), ops_module: false_ops_module}
|
# assert bdd_store.structures_by_id[false_id] == %{structure: Node.mk_false(), ops_module: false_ops_module}
|
||||||
|
#
|
||||||
# Check true node
|
# # Check true node
|
||||||
true_id = Tilly.BDD.true_node_id()
|
# true_id = Tilly.BDD.true_node_id()
|
||||||
true_ops_module = Tilly.BDD.universal_ops_module()
|
# true_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
assert bdd_store.nodes_by_structure[{Node.mk_true(), true_ops_module}] == true_id
|
# assert bdd_store.nodes_by_structure[{Node.mk_true(), true_ops_module}] == true_id
|
||||||
assert bdd_store.structures_by_id[true_id] == %{structure: Node.mk_true(), ops_module: true_ops_module}
|
# assert bdd_store.structures_by_id[true_id] == %{structure: Node.mk_true(), ops_module: true_ops_module}
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "get_or_intern_node/3" do
|
# describe "get_or_intern_node/3" do
|
||||||
setup do
|
# setup do
|
||||||
typing_ctx = Tilly.BDD.init_bdd_store(%{})
|
# typing_ctx = Tilly.BDD.init_bdd_store(%{})
|
||||||
%{initial_ctx: typing_ctx}
|
# %{initial_ctx: typing_ctx}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning Node.mk_false() returns predefined false_id and doesn't change store", %{initial_ctx: ctx} do
|
# test "interning Node.mk_false() returns predefined false_id and doesn't change store", %{initial_ctx: ctx} do
|
||||||
false_ops_module = Tilly.BDD.universal_ops_module()
|
# false_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
{new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_false(), false_ops_module)
|
# {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_false(), false_ops_module)
|
||||||
assert node_id == Tilly.BDD.false_node_id()
|
# assert node_id == Tilly.BDD.false_node_id()
|
||||||
assert new_ctx.bdd_store == ctx.bdd_store
|
# assert new_ctx.bdd_store == ctx.bdd_store
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning Node.mk_true() returns predefined true_id and doesn't change store", %{initial_ctx: ctx} do
|
# test "interning Node.mk_true() returns predefined true_id and doesn't change store", %{initial_ctx: ctx} do
|
||||||
true_ops_module = Tilly.BDD.universal_ops_module()
|
# true_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
{new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_true(), true_ops_module)
|
# {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_true(), true_ops_module)
|
||||||
assert node_id == Tilly.BDD.true_node_id()
|
# assert node_id == Tilly.BDD.true_node_id()
|
||||||
assert new_ctx.bdd_store == ctx.bdd_store
|
# assert new_ctx.bdd_store == ctx.bdd_store
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning a new leaf node returns a new ID and updates the store", %{initial_ctx: ctx} do
|
# test "interning a new leaf node returns a new ID and updates the store", %{initial_ctx: ctx} do
|
||||||
leaf_structure = Node.mk_leaf("test_leaf")
|
# leaf_structure = Node.mk_leaf("test_leaf")
|
||||||
ops_mod = :my_ops
|
# ops_mod = :my_ops
|
||||||
|
#
|
||||||
{ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
# {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
||||||
|
#
|
||||||
assert node_id == 2 # Initial next_node_id
|
# assert node_id == 2 # Initial next_node_id
|
||||||
assert ctx_after_intern.bdd_store.next_node_id == 3
|
# assert ctx_after_intern.bdd_store.next_node_id == 3
|
||||||
assert ctx_after_intern.bdd_store.nodes_by_structure[{leaf_structure, ops_mod}] == node_id
|
# assert ctx_after_intern.bdd_store.nodes_by_structure[{leaf_structure, ops_mod}] == node_id
|
||||||
assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: leaf_structure, ops_module: ops_mod}
|
# assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: leaf_structure, ops_module: ops_mod}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning the same leaf node again returns the same ID and doesn't change store", %{initial_ctx: ctx} do
|
# test "interning the same leaf node again returns the same ID and doesn't change store", %{initial_ctx: ctx} do
|
||||||
leaf_structure = Node.mk_leaf("test_leaf")
|
# leaf_structure = Node.mk_leaf("test_leaf")
|
||||||
ops_mod = :my_ops
|
# ops_mod = :my_ops
|
||||||
|
#
|
||||||
{ctx_after_first_intern, first_node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
# {ctx_after_first_intern, first_node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
||||||
{ctx_after_second_intern, second_node_id} = Tilly.BDD.get_or_intern_node(ctx_after_first_intern, leaf_structure, ops_mod)
|
# {ctx_after_second_intern, second_node_id} = Tilly.BDD.get_or_intern_node(ctx_after_first_intern, leaf_structure, ops_mod)
|
||||||
|
#
|
||||||
assert first_node_id == second_node_id
|
# assert first_node_id == second_node_id
|
||||||
assert ctx_after_first_intern.bdd_store == ctx_after_second_intern.bdd_store
|
# assert ctx_after_first_intern.bdd_store == ctx_after_second_intern.bdd_store
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning a new split node returns a new ID and updates the store", %{initial_ctx: ctx} do
|
# test "interning a new split node returns a new ID and updates the store", %{initial_ctx: ctx} do
|
||||||
split_structure = Node.mk_split(:el, Tilly.BDD.true_node_id(), Tilly.BDD.false_node_id(), Tilly.BDD.true_node_id())
|
# split_structure = Node.mk_split(:el, Tilly.BDD.true_node_id(), Tilly.BDD.false_node_id(), Tilly.BDD.true_node_id())
|
||||||
ops_mod = :split_ops
|
# ops_mod = :split_ops
|
||||||
|
#
|
||||||
{ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, split_structure, ops_mod)
|
# {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, split_structure, ops_mod)
|
||||||
|
#
|
||||||
assert node_id == 2 # Initial next_node_id
|
# assert node_id == 2 # Initial next_node_id
|
||||||
assert ctx_after_intern.bdd_store.next_node_id == 3
|
# assert ctx_after_intern.bdd_store.next_node_id == 3
|
||||||
assert ctx_after_intern.bdd_store.nodes_by_structure[{split_structure, ops_mod}] == node_id
|
# assert ctx_after_intern.bdd_store.nodes_by_structure[{split_structure, ops_mod}] == node_id
|
||||||
assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: split_structure, ops_module: ops_mod}
|
# assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: split_structure, ops_module: ops_mod}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "interning structurally identical nodes with different ops_modules results in different IDs", %{initial_ctx: ctx} do
|
# test "interning structurally identical nodes with different ops_modules results in different IDs", %{initial_ctx: ctx} do
|
||||||
leaf_structure = Node.mk_leaf("shared_leaf")
|
# leaf_structure = Node.mk_leaf("shared_leaf")
|
||||||
ops_mod1 = :ops1
|
# ops_mod1 = :ops1
|
||||||
ops_mod2 = :ops2
|
# ops_mod2 = :ops2
|
||||||
|
#
|
||||||
{ctx1, id1} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod1)
|
# {ctx1, id1} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod1)
|
||||||
{_ctx2, id2} = Tilly.BDD.get_or_intern_node(ctx1, leaf_structure, ops_mod2)
|
# {_ctx2, id2} = Tilly.BDD.get_or_intern_node(ctx1, leaf_structure, ops_mod2)
|
||||||
|
#
|
||||||
assert id1 != id2
|
# assert id1 != id2
|
||||||
assert id1 == 2
|
# assert id1 == 2
|
||||||
assert id2 == 3
|
# assert id2 == 3
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "raises ArgumentError if bdd_store is not initialized" do
|
# test "raises ArgumentError if bdd_store is not initialized" do
|
||||||
assert_raise ArgumentError, ~r/BDD store not initialized/, fn ->
|
# assert_raise ArgumentError, ~r/BDD store not initialized/, fn ->
|
||||||
Tilly.BDD.get_or_intern_node(%{}, Node.mk_leaf("foo"), :ops)
|
# Tilly.BDD.get_or_intern_node(%{}, Node.mk_leaf("foo"), :ops)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "get_node_data/2" do
|
# describe "get_node_data/2" do
|
||||||
setup do
|
# setup do
|
||||||
ctx = Tilly.BDD.init_bdd_store(%{})
|
# ctx = Tilly.BDD.init_bdd_store(%{})
|
||||||
leaf_structure = Node.mk_leaf("data")
|
# leaf_structure = Node.mk_leaf("data")
|
||||||
ops_mod = :leaf_ops
|
# ops_mod = :leaf_ops
|
||||||
{new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
# {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
||||||
%{ctx: new_ctx, leaf_structure: leaf_structure, ops_mod: ops_mod, leaf_id: leaf_id_val}
|
# %{ctx: new_ctx, leaf_structure: leaf_structure, ops_mod: ops_mod, leaf_id: leaf_id_val}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns correct data for false node", %{ctx: ctx} do
|
# test "returns correct data for false node", %{ctx: ctx} do
|
||||||
false_id = Tilly.BDD.false_node_id()
|
# false_id = Tilly.BDD.false_node_id()
|
||||||
false_ops_module = Tilly.BDD.universal_ops_module()
|
# false_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
assert Tilly.BDD.get_node_data(ctx, false_id) == %{structure: Node.mk_false(), ops_module: false_ops_module}
|
# assert Tilly.BDD.get_node_data(ctx, false_id) == %{structure: Node.mk_false(), ops_module: false_ops_module}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns correct data for true node", %{ctx: ctx} do
|
# test "returns correct data for true node", %{ctx: ctx} do
|
||||||
true_id = Tilly.BDD.true_node_id()
|
# true_id = Tilly.BDD.true_node_id()
|
||||||
true_ops_module = Tilly.BDD.universal_ops_module()
|
# true_ops_module = Tilly.BDD.universal_ops_module()
|
||||||
assert Tilly.BDD.get_node_data(ctx, true_id) == %{structure: Node.mk_true(), ops_module: true_ops_module}
|
# assert Tilly.BDD.get_node_data(ctx, true_id) == %{structure: Node.mk_true(), ops_module: true_ops_module}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns correct data for a custom interned leaf node", %{ctx: ctx, leaf_structure: ls, ops_mod: om, leaf_id: id} do
|
# test "returns correct data for a custom interned leaf node", %{ctx: ctx, leaf_structure: ls, ops_mod: om, leaf_id: id} do
|
||||||
assert Tilly.BDD.get_node_data(ctx, id) == %{structure: ls, ops_module: om}
|
# assert Tilly.BDD.get_node_data(ctx, id) == %{structure: ls, ops_module: om}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns nil for an unknown node ID", %{ctx: ctx} do
|
# test "returns nil for an unknown node ID", %{ctx: ctx} do
|
||||||
assert Tilly.BDD.get_node_data(ctx, 999) == nil
|
# assert Tilly.BDD.get_node_data(ctx, 999) == nil
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "returns nil if bdd_store not in ctx" do
|
# test "returns nil if bdd_store not in ctx" do
|
||||||
assert Tilly.BDD.get_node_data(%{}, 0) == nil
|
# assert Tilly.BDD.get_node_data(%{}, 0) == nil
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "is_false_node?/2 and is_true_node?/2" do
|
# describe "is_false_node?/2 and is_true_node?/2" do
|
||||||
setup do
|
# setup do
|
||||||
ctx = Tilly.BDD.init_bdd_store(%{})
|
# ctx = Tilly.BDD.init_bdd_store(%{})
|
||||||
leaf_structure = Node.mk_leaf("data")
|
# leaf_structure = Node.mk_leaf("data")
|
||||||
ops_mod = :leaf_ops
|
# ops_mod = :leaf_ops
|
||||||
{new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
# {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod)
|
||||||
%{ctx: new_ctx, leaf_id: leaf_id_val}
|
# %{ctx: new_ctx, leaf_id: leaf_id_val}
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_false_node?/2", %{ctx: ctx, leaf_id: id} do
|
# test "is_false_node?/2", %{ctx: ctx, leaf_id: id} do
|
||||||
assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.false_node_id()) == true
|
# assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.false_node_id()) == true
|
||||||
assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.true_node_id()) == false
|
# assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.true_node_id()) == false
|
||||||
assert Tilly.BDD.is_false_node?(ctx, id) == false
|
# assert Tilly.BDD.is_false_node?(ctx, id) == false
|
||||||
assert Tilly.BDD.is_false_node?(ctx, 999) == false # Unknown ID
|
# assert Tilly.BDD.is_false_node?(ctx, 999) == false # Unknown ID
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "is_true_node?/2", %{ctx: ctx, leaf_id: id} do
|
# test "is_true_node?/2", %{ctx: ctx, leaf_id: id} do
|
||||||
assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.true_node_id()) == true
|
# assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.true_node_id()) == true
|
||||||
assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.false_node_id()) == false
|
# assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.false_node_id()) == false
|
||||||
assert Tilly.BDD.is_true_node?(ctx, id) == false
|
# assert Tilly.BDD.is_true_node?(ctx, id) == false
|
||||||
assert Tilly.BDD.is_true_node?(ctx, 999) == false # Unknown ID
|
# assert Tilly.BDD.is_true_node?(ctx, 999) == false # Unknown ID
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,162 +1,162 @@
|
|||||||
defmodule Tilly.Type.OpsTest do
|
# defmodule Tilly.Type.OpsTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD
|
# alias Tilly.BDD
|
||||||
alias Tilly.Type.Store
|
# alias Tilly.Type.Store
|
||||||
alias Tilly.Type.Ops
|
# alias Tilly.Type.Ops
|
||||||
|
#
|
||||||
defp init_context do
|
# defp init_context do
|
||||||
%{}
|
# %{}
|
||||||
|> BDD.init_bdd_store()
|
# |> BDD.init_bdd_store()
|
||||||
|> Store.init_type_store()
|
# |> Store.init_type_store()
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "get_type_nothing/1 and get_type_any/1" do
|
# describe "get_type_nothing/1 and get_type_any/1" do
|
||||||
test "get_type_nothing returns an interned Descr ID for the empty type" do
|
# test "get_type_nothing returns an interned Descr ID for the empty type" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx_after_nothing, nothing_id} = Ops.get_type_nothing(ctx)
|
# {ctx_after_nothing, nothing_id} = Ops.get_type_nothing(ctx)
|
||||||
assert Ops.is_empty_type?(ctx_after_nothing, nothing_id)
|
# assert Ops.is_empty_type?(ctx_after_nothing, nothing_id)
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "get_type_any returns an interned Descr ID for the universal type" do
|
# test "get_type_any returns an interned Descr ID for the universal type" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx_after_any, any_id} = Ops.get_type_any(ctx)
|
# {ctx_after_any, any_id} = Ops.get_type_any(ctx)
|
||||||
refute Ops.is_empty_type?(ctx_after_any, any_id)
|
# refute Ops.is_empty_type?(ctx_after_any, any_id)
|
||||||
|
#
|
||||||
# Further check: any type negated should be nothing type
|
# # Further check: any type negated should be nothing type
|
||||||
{ctx1, neg_any_id} = Ops.negation_type(ctx_after_any, any_id)
|
# {ctx1, neg_any_id} = Ops.negation_type(ctx_after_any, any_id)
|
||||||
{ctx2, nothing_id} = Ops.get_type_nothing(ctx1)
|
# {ctx2, nothing_id} = Ops.get_type_nothing(ctx1)
|
||||||
assert neg_any_id == nothing_id
|
# assert neg_any_id == nothing_id
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "literal type constructors" do
|
# describe "literal type constructors" do
|
||||||
test "create_atom_literal_type/2" do
|
# test "create_atom_literal_type/2" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx1, atom_foo_id} = Ops.create_atom_literal_type(ctx, :foo)
|
# {ctx1, atom_foo_id} = Ops.create_atom_literal_type(ctx, :foo)
|
||||||
{ctx2, atom_bar_id} = Ops.create_atom_literal_type(ctx1, :bar)
|
# {ctx2, atom_bar_id} = Ops.create_atom_literal_type(ctx1, :bar)
|
||||||
{ctx3, atom_foo_again_id} = Ops.create_atom_literal_type(ctx2, :foo)
|
# {ctx3, atom_foo_again_id} = Ops.create_atom_literal_type(ctx2, :foo)
|
||||||
|
#
|
||||||
refute Ops.is_empty_type?(ctx3, atom_foo_id)
|
# refute Ops.is_empty_type?(ctx3, atom_foo_id)
|
||||||
refute Ops.is_empty_type?(ctx3, atom_bar_id)
|
# refute Ops.is_empty_type?(ctx3, atom_bar_id)
|
||||||
assert atom_foo_id != atom_bar_id
|
# assert atom_foo_id != atom_bar_id
|
||||||
assert atom_foo_id == atom_foo_again_id
|
# assert atom_foo_id == atom_foo_again_id
|
||||||
|
#
|
||||||
# Test intersection: (:foo & :bar) should be Nothing
|
# # Test intersection: (:foo & :bar) should be Nothing
|
||||||
{ctx4, intersection_id} = Ops.intersection_types(ctx3, atom_foo_id, atom_bar_id)
|
# {ctx4, intersection_id} = Ops.intersection_types(ctx3, atom_foo_id, atom_bar_id)
|
||||||
assert Ops.is_empty_type?(ctx4, intersection_id)
|
# assert Ops.is_empty_type?(ctx4, intersection_id)
|
||||||
|
#
|
||||||
# Test union: (:foo | :bar) should not be empty
|
# # Test union: (:foo | :bar) should not be empty
|
||||||
{ctx5, union_id} = Ops.union_types(ctx4, atom_foo_id, atom_bar_id)
|
# {ctx5, union_id} = Ops.union_types(ctx4, atom_foo_id, atom_bar_id)
|
||||||
refute Ops.is_empty_type?(ctx5, union_id)
|
# refute Ops.is_empty_type?(ctx5, union_id)
|
||||||
|
#
|
||||||
# Test negation: (not :foo) should not be empty and not be :foo
|
# # Test negation: (not :foo) should not be empty and not be :foo
|
||||||
{ctx6, not_foo_id} = Ops.negation_type(ctx5, atom_foo_id)
|
# {ctx6, not_foo_id} = Ops.negation_type(ctx5, atom_foo_id)
|
||||||
refute Ops.is_empty_type?(ctx6, not_foo_id)
|
# refute Ops.is_empty_type?(ctx6, not_foo_id)
|
||||||
{ctx7, intersection_not_foo_and_foo} = Ops.intersection_types(ctx6, atom_foo_id, not_foo_id)
|
# {ctx7, intersection_not_foo_and_foo} = Ops.intersection_types(ctx6, atom_foo_id, not_foo_id)
|
||||||
assert Ops.is_empty_type?(ctx7, intersection_not_foo_and_foo)
|
# assert Ops.is_empty_type?(ctx7, intersection_not_foo_and_foo)
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "create_integer_literal_type/2" do
|
# test "create_integer_literal_type/2" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx1, int_1_id} = Ops.create_integer_literal_type(ctx, 1)
|
# {ctx1, int_1_id} = Ops.create_integer_literal_type(ctx, 1)
|
||||||
{ctx2, int_2_id} = Ops.create_integer_literal_type(ctx1, 2)
|
# {ctx2, int_2_id} = Ops.create_integer_literal_type(ctx1, 2)
|
||||||
|
#
|
||||||
refute Ops.is_empty_type?(ctx2, int_1_id) # Use ctx2
|
# refute Ops.is_empty_type?(ctx2, int_1_id) # Use ctx2
|
||||||
{ctx3, intersection_id} = Ops.intersection_types(ctx2, int_1_id, int_2_id)
|
# {ctx3, intersection_id} = Ops.intersection_types(ctx2, int_1_id, int_2_id)
|
||||||
assert Ops.is_empty_type?(ctx3, intersection_id)
|
# assert Ops.is_empty_type?(ctx3, intersection_id)
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "create_string_literal_type/2" do
|
# test "create_string_literal_type/2" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx1, str_a_id} = Ops.create_string_literal_type(ctx, "a")
|
# {ctx1, str_a_id} = Ops.create_string_literal_type(ctx, "a")
|
||||||
{ctx2, str_b_id} = Ops.create_string_literal_type(ctx1, "b")
|
# {ctx2, str_b_id} = Ops.create_string_literal_type(ctx1, "b")
|
||||||
|
#
|
||||||
refute Ops.is_empty_type?(ctx2, str_a_id) # Use ctx2
|
# refute Ops.is_empty_type?(ctx2, str_a_id) # Use ctx2
|
||||||
{ctx3, intersection_id} = Ops.intersection_types(ctx2, str_a_id, str_b_id)
|
# {ctx3, intersection_id} = Ops.intersection_types(ctx2, str_a_id, str_b_id)
|
||||||
assert Ops.is_empty_type?(ctx3, intersection_id)
|
# assert Ops.is_empty_type?(ctx3, intersection_id)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "primitive type constructors (any_of_kind)" do
|
# describe "primitive type constructors (any_of_kind)" do
|
||||||
test "get_primitive_type_any_atom/1" do
|
# test "get_primitive_type_any_atom/1" do
|
||||||
ctx = init_context()
|
# ctx = init_context()
|
||||||
{ctx1, any_atom_id} = Ops.get_primitive_type_any_atom(ctx)
|
# {ctx1, any_atom_id} = Ops.get_primitive_type_any_atom(ctx)
|
||||||
{ctx2, atom_foo_id} = Ops.create_atom_literal_type(ctx1, :foo)
|
# {ctx2, atom_foo_id} = Ops.create_atom_literal_type(ctx1, :foo)
|
||||||
|
#
|
||||||
refute Ops.is_empty_type?(ctx2, any_atom_id)
|
# refute Ops.is_empty_type?(ctx2, any_atom_id)
|
||||||
# :foo should be a subtype of AnyAtom (i.e., :foo INTERSECTION (NEGATION AnyAtom) == Empty)
|
# # :foo should be a subtype of AnyAtom (i.e., :foo INTERSECTION (NEGATION AnyAtom) == Empty)
|
||||||
# Or, :foo UNION AnyAtom == AnyAtom
|
# # Or, :foo UNION AnyAtom == AnyAtom
|
||||||
# Or, :foo INTERSECTION AnyAtom == :foo
|
# # Or, :foo INTERSECTION AnyAtom == :foo
|
||||||
{ctx3, intersection_foo_any_atom_id} = Ops.intersection_types(ctx2, atom_foo_id, any_atom_id)
|
# {ctx3, intersection_foo_any_atom_id} = Ops.intersection_types(ctx2, atom_foo_id, any_atom_id)
|
||||||
assert intersection_foo_any_atom_id == atom_foo_id # Check it simplifies to :foo
|
# assert intersection_foo_any_atom_id == atom_foo_id # Check it simplifies to :foo
|
||||||
|
#
|
||||||
# Test original subtype logic: (:foo & (not AnyAtom)) == Empty
|
# # Test original subtype logic: (:foo & (not AnyAtom)) == Empty
|
||||||
{ctx4, not_any_atom_id} = Ops.negation_type(ctx3, any_atom_id) # Use ctx3
|
# {ctx4, not_any_atom_id} = Ops.negation_type(ctx3, any_atom_id) # Use ctx3
|
||||||
{ctx5, intersection_subtype_check_id} = Ops.intersection_types(ctx4, atom_foo_id, not_any_atom_id)
|
# {ctx5, intersection_subtype_check_id} = Ops.intersection_types(ctx4, atom_foo_id, not_any_atom_id)
|
||||||
assert Ops.is_empty_type?(ctx5, intersection_subtype_check_id)
|
# assert Ops.is_empty_type?(ctx5, intersection_subtype_check_id)
|
||||||
|
#
|
||||||
# AnyAtom & AnyInteger should be Empty
|
# # AnyAtom & AnyInteger should be Empty
|
||||||
{ctx6, any_integer_id} = Ops.get_primitive_type_any_integer(ctx5) # Use ctx5
|
# {ctx6, any_integer_id} = Ops.get_primitive_type_any_integer(ctx5) # Use ctx5
|
||||||
{ctx7, atom_int_intersect_id} = Ops.intersection_types(ctx6, any_atom_id, any_integer_id)
|
# {ctx7, atom_int_intersect_id} = Ops.intersection_types(ctx6, any_atom_id, any_integer_id)
|
||||||
assert Ops.is_empty_type?(ctx7, atom_int_intersect_id)
|
# assert Ops.is_empty_type?(ctx7, atom_int_intersect_id)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "union_types, intersection_types, negation_type" do
|
# describe "union_types, intersection_types, negation_type" do
|
||||||
test "basic set properties" do
|
# test "basic set properties" do
|
||||||
ctx0 = init_context()
|
# ctx0 = init_context()
|
||||||
{ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a)
|
# {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a)
|
||||||
{ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b)
|
# {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b)
|
||||||
{ctx3, type_c_id} = Ops.create_atom_literal_type(ctx2, :c)
|
# {ctx3, type_c_id} = Ops.create_atom_literal_type(ctx2, :c)
|
||||||
{ctx4, nothing_id} = Ops.get_type_nothing(ctx3)
|
# {ctx4, nothing_id} = Ops.get_type_nothing(ctx3)
|
||||||
|
#
|
||||||
# A | Nothing = A
|
# # A | Nothing = A
|
||||||
{ctx5, union_a_nothing_id} = Ops.union_types(ctx4, type_a_id, nothing_id)
|
# {ctx5, union_a_nothing_id} = Ops.union_types(ctx4, type_a_id, nothing_id)
|
||||||
assert union_a_nothing_id == type_a_id
|
# assert union_a_nothing_id == type_a_id
|
||||||
|
#
|
||||||
# A & Nothing = Nothing
|
# # A & Nothing = Nothing
|
||||||
{ctx6, intersect_a_nothing_id} = Ops.intersection_types(ctx5, type_a_id, nothing_id)
|
# {ctx6, intersect_a_nothing_id} = Ops.intersection_types(ctx5, type_a_id, nothing_id)
|
||||||
assert intersect_a_nothing_id == nothing_id
|
# assert intersect_a_nothing_id == nothing_id
|
||||||
|
#
|
||||||
# not (not A) = A
|
# # not (not A) = A
|
||||||
{ctx7, not_a_id} = Ops.negation_type(ctx6, type_a_id)
|
# {ctx7, not_a_id} = Ops.negation_type(ctx6, type_a_id)
|
||||||
{ctx8, not_not_a_id} = Ops.negation_type(ctx7, not_a_id)
|
# {ctx8, not_not_a_id} = Ops.negation_type(ctx7, not_a_id)
|
||||||
assert not_not_a_id == type_a_id
|
# assert not_not_a_id == type_a_id
|
||||||
|
#
|
||||||
# A | B
|
# # A | B
|
||||||
{ctx9, union_ab_id} = Ops.union_types(ctx8, type_a_id, type_b_id)
|
# {ctx9, union_ab_id} = Ops.union_types(ctx8, type_a_id, type_b_id)
|
||||||
# (A | B) & A = A
|
# # (A | B) & A = A
|
||||||
{ctx10, intersect_union_a_id} = Ops.intersection_types(ctx9, union_ab_id, type_a_id)
|
# {ctx10, intersect_union_a_id} = Ops.intersection_types(ctx9, union_ab_id, type_a_id)
|
||||||
assert intersect_union_a_id == type_a_id
|
# assert intersect_union_a_id == type_a_id
|
||||||
|
#
|
||||||
# (A | B) & C = Nothing (if A, B, C are distinct atom literals)
|
# # (A | B) & C = Nothing (if A, B, C are distinct atom literals)
|
||||||
{ctx11, intersect_union_c_id} = Ops.intersection_types(ctx10, union_ab_id, type_c_id)
|
# {ctx11, intersect_union_c_id} = Ops.intersection_types(ctx10, union_ab_id, type_c_id)
|
||||||
assert Ops.is_empty_type?(ctx11, intersect_union_c_id)
|
# assert Ops.is_empty_type?(ctx11, intersect_union_c_id)
|
||||||
|
#
|
||||||
# Commutativity and idempotence of union/intersection are implicitly tested by caching
|
# # Commutativity and idempotence of union/intersection are implicitly tested by caching
|
||||||
# and canonical key generation in apply_type_op.
|
# # and canonical key generation in apply_type_op.
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "type operations are cached" do
|
# test "type operations are cached" do
|
||||||
ctx0 = init_context()
|
# ctx0 = init_context()
|
||||||
{ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a)
|
# {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a)
|
||||||
{ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b)
|
# {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b)
|
||||||
|
#
|
||||||
# Perform an operation
|
# # Perform an operation
|
||||||
{ctx3, union1_id} = Ops.union_types(ctx2, type_a_id, type_b_id)
|
# {ctx3, union1_id} = Ops.union_types(ctx2, type_a_id, type_b_id)
|
||||||
initial_cache_size = map_size(ctx3.type_store.ops_cache)
|
# initial_cache_size = map_size(ctx3.type_store.ops_cache)
|
||||||
assert initial_cache_size > 0 # Ensure something was cached
|
# assert initial_cache_size > 0 # Ensure something was cached
|
||||||
|
#
|
||||||
# Perform the same operation again
|
# # Perform the same operation again
|
||||||
{ctx4, union2_id} = Ops.union_types(ctx3, type_a_id, type_b_id)
|
# {ctx4, union2_id} = Ops.union_types(ctx3, type_a_id, type_b_id)
|
||||||
assert union1_id == union2_id
|
# assert union1_id == union2_id
|
||||||
assert map_size(ctx4.type_store.ops_cache) == initial_cache_size # Cache size should not change
|
# assert map_size(ctx4.type_store.ops_cache) == initial_cache_size # Cache size should not change
|
||||||
|
#
|
||||||
# Perform with swapped arguments (commutative)
|
# # Perform with swapped arguments (commutative)
|
||||||
{ctx5, union3_id} = Ops.union_types(ctx4, type_b_id, type_a_id)
|
# {ctx5, union3_id} = Ops.union_types(ctx4, type_b_id, type_a_id)
|
||||||
assert union1_id == union3_id
|
# assert union1_id == union3_id
|
||||||
assert map_size(ctx5.type_store.ops_cache) == initial_cache_size # Cache size should not change
|
# assert map_size(ctx5.type_store.ops_cache) == initial_cache_size # Cache size should not change
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,67 +1,67 @@
|
|||||||
defmodule Tilly.Type.StoreTest do
|
# defmodule Tilly.Type.StoreTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD
|
# alias Tilly.BDD
|
||||||
alias Tilly.Type
|
# alias Tilly.Type
|
||||||
alias Tilly.Type.Store
|
# alias Tilly.Type.Store
|
||||||
|
#
|
||||||
defp init_context do
|
# defp init_context do
|
||||||
%{}
|
# %{}
|
||||||
|> BDD.init_bdd_store()
|
# |> BDD.init_bdd_store()
|
||||||
|> Store.init_type_store()
|
# |> Store.init_type_store()
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "init_type_store/1" do
|
# describe "init_type_store/1" do
|
||||||
test "initializes an empty type store in the typing_ctx" do
|
# test "initializes an empty type store in the typing_ctx" do
|
||||||
typing_ctx = %{}
|
# typing_ctx = %{}
|
||||||
new_ctx = Store.init_type_store(typing_ctx)
|
# new_ctx = Store.init_type_store(typing_ctx)
|
||||||
type_store = Map.get(new_ctx, :type_store)
|
# type_store = Map.get(new_ctx, :type_store)
|
||||||
|
#
|
||||||
assert type_store.descrs_by_structure == %{}
|
# assert type_store.descrs_by_structure == %{}
|
||||||
assert type_store.structures_by_id == %{}
|
# assert type_store.structures_by_id == %{}
|
||||||
assert type_store.next_descr_id == 0
|
# assert type_store.next_descr_id == 0
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "get_or_intern_descr/2 and get_descr_by_id/2" do
|
# describe "get_or_intern_descr/2 and get_descr_by_id/2" do
|
||||||
test "interns a new Descr map and retrieves it" do
|
# test "interns a new Descr map and retrieves it" do
|
||||||
typing_ctx = init_context()
|
# typing_ctx = init_context()
|
||||||
descr_map1 = Type.empty_descr(typing_ctx) # Uses canonical BDD.false_node_id()
|
# descr_map1 = Type.empty_descr(typing_ctx) # Uses canonical BDD.false_node_id()
|
||||||
|
#
|
||||||
# Intern first time
|
# # Intern first time
|
||||||
{ctx1, id1} = Store.get_or_intern_descr(typing_ctx, descr_map1)
|
# {ctx1, id1} = Store.get_or_intern_descr(typing_ctx, descr_map1)
|
||||||
assert id1 == 0
|
# assert id1 == 0
|
||||||
assert Store.get_descr_by_id(ctx1, id1) == descr_map1
|
# assert Store.get_descr_by_id(ctx1, id1) == descr_map1
|
||||||
assert ctx1.type_store.next_descr_id == 1
|
# assert ctx1.type_store.next_descr_id == 1
|
||||||
|
#
|
||||||
# Retrieve existing
|
# # Retrieve existing
|
||||||
{ctx2, id1_retrieved} = Store.get_or_intern_descr(ctx1, descr_map1)
|
# {ctx2, id1_retrieved} = Store.get_or_intern_descr(ctx1, descr_map1)
|
||||||
assert id1_retrieved == id1
|
# assert id1_retrieved == id1
|
||||||
assert ctx2 == ctx1 # Context should not change if already interned
|
# assert ctx2 == ctx1 # Context should not change if already interned
|
||||||
|
#
|
||||||
# Intern a different Descr map
|
# # Intern a different Descr map
|
||||||
descr_map2 = Type.any_descr(typing_ctx) # Uses canonical BDD.true_node_id()
|
# descr_map2 = Type.any_descr(typing_ctx) # Uses canonical BDD.true_node_id()
|
||||||
{ctx3, id2} = Store.get_or_intern_descr(ctx2, descr_map2)
|
# {ctx3, id2} = Store.get_or_intern_descr(ctx2, descr_map2)
|
||||||
assert id2 == 1
|
# assert id2 == 1
|
||||||
assert Store.get_descr_by_id(ctx3, id2) == descr_map2
|
# assert Store.get_descr_by_id(ctx3, id2) == descr_map2
|
||||||
assert ctx3.type_store.next_descr_id == 2
|
# assert ctx3.type_store.next_descr_id == 2
|
||||||
|
#
|
||||||
# Ensure original is still retrievable
|
# # Ensure original is still retrievable
|
||||||
assert Store.get_descr_by_id(ctx3, id1) == descr_map1
|
# assert Store.get_descr_by_id(ctx3, id1) == descr_map1
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "get_descr_by_id returns nil for non-existent ID" do
|
# test "get_descr_by_id returns nil for non-existent ID" do
|
||||||
typing_ctx = init_context()
|
# typing_ctx = init_context()
|
||||||
assert Store.get_descr_by_id(typing_ctx, 999) == nil
|
# assert Store.get_descr_by_id(typing_ctx, 999) == nil
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
test "raises an error if type store is not initialized" do
|
# test "raises an error if type store is not initialized" do
|
||||||
uninitialized_ctx = %{}
|
# uninitialized_ctx = %{}
|
||||||
descr_map = Type.empty_descr(uninitialized_ctx)
|
# descr_map = Type.empty_descr(uninitialized_ctx)
|
||||||
|
#
|
||||||
assert_raise ArgumentError,
|
# assert_raise ArgumentError,
|
||||||
"Type store not initialized in typing_ctx. Call init_type_store first.",
|
# "Type store not initialized in typing_ctx. Call init_type_store first.",
|
||||||
fn -> Store.get_or_intern_descr(uninitialized_ctx, descr_map) end
|
# fn -> Store.get_or_intern_descr(uninitialized_ctx, descr_map) end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
@ -1,39 +1,39 @@
|
|||||||
defmodule Tilly.TypeTest do
|
# defmodule Tilly.TypeTest do
|
||||||
use ExUnit.Case, async: true
|
# use ExUnit.Case, async: true
|
||||||
|
#
|
||||||
alias Tilly.BDD
|
# alias Tilly.BDD
|
||||||
alias Tilly.Type
|
# alias Tilly.Type
|
||||||
|
#
|
||||||
describe "empty_descr/1" do
|
# describe "empty_descr/1" do
|
||||||
test "returns a Descr map with all BDD IDs pointing to false" do
|
# test "returns a Descr map with all BDD IDs pointing to false" do
|
||||||
typing_ctx = BDD.init_bdd_store(%{})
|
# typing_ctx = BDD.init_bdd_store(%{})
|
||||||
descr = Type.empty_descr(typing_ctx)
|
# descr = Type.empty_descr(typing_ctx)
|
||||||
false_id = BDD.false_node_id()
|
# false_id = BDD.false_node_id()
|
||||||
|
#
|
||||||
assert descr.atoms_bdd_id == false_id
|
# assert descr.atoms_bdd_id == false_id
|
||||||
assert descr.integers_bdd_id == false_id
|
# assert descr.integers_bdd_id == false_id
|
||||||
assert descr.strings_bdd_id == false_id
|
# assert descr.strings_bdd_id == false_id
|
||||||
assert descr.pairs_bdd_id == false_id
|
# assert descr.pairs_bdd_id == false_id
|
||||||
assert descr.records_bdd_id == false_id
|
# assert descr.records_bdd_id == false_id
|
||||||
assert descr.functions_bdd_id == false_id
|
# assert descr.functions_bdd_id == false_id
|
||||||
assert descr.absent_marker_bdd_id == false_id
|
# assert descr.absent_marker_bdd_id == false_id
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
#
|
||||||
describe "any_descr/1" do
|
# describe "any_descr/1" do
|
||||||
test "returns a Descr map with BDD IDs pointing to true (and absent_marker to false)" do
|
# test "returns a Descr map with BDD IDs pointing to true (and absent_marker to false)" do
|
||||||
typing_ctx = BDD.init_bdd_store(%{})
|
# typing_ctx = BDD.init_bdd_store(%{})
|
||||||
descr = Type.any_descr(typing_ctx)
|
# descr = Type.any_descr(typing_ctx)
|
||||||
true_id = BDD.true_node_id()
|
# true_id = BDD.true_node_id()
|
||||||
false_id = BDD.false_node_id()
|
# false_id = BDD.false_node_id()
|
||||||
|
#
|
||||||
assert descr.atoms_bdd_id == true_id
|
# assert descr.atoms_bdd_id == true_id
|
||||||
assert descr.integers_bdd_id == true_id
|
# assert descr.integers_bdd_id == true_id
|
||||||
assert descr.strings_bdd_id == true_id
|
# assert descr.strings_bdd_id == true_id
|
||||||
assert descr.pairs_bdd_id == true_id
|
# assert descr.pairs_bdd_id == true_id
|
||||||
assert descr.records_bdd_id == true_id
|
# assert descr.records_bdd_id == true_id
|
||||||
assert descr.functions_bdd_id == true_id
|
# assert descr.functions_bdd_id == true_id
|
||||||
assert descr.absent_marker_bdd_id == false_id
|
# assert descr.absent_marker_bdd_id == false_id
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user