diff --git a/lib/debug.ex b/lib/debug.ex new file mode 100644 index 0000000..8776ca8 --- /dev/null +++ b/lib/debug.ex @@ -0,0 +1,688 @@ +defmodule Tdd.Debug.TracerData do + @moduledoc """ + Provides functions to sanitize Elixir terms for tracing and JSON serialization. + """ + + @max_sanitize_depth 5 + + def sanitize_values(list) when is_list(list) do + Enum.map(list, &sanitize_value/1) + end + + def sanitize_value(val) do + do_sanitize(val, @max_sanitize_depth) + end + + defp do_sanitize(val, _depth_limit) + when is_integer(val) or is_float(val) or is_atom(val) or is_boolean(val), + do: val + + # Assume strings are fine + defp do_sanitize(val, _depth_limit) when is_binary(val), do: val + defp do_sanitize(nil, _depth_limit), do: nil + + defp do_sanitize(val, _depth_limit) when is_pid(val), do: inspect(val) + defp do_sanitize(val, _depth_limit) when is_port(val), do: inspect(val) + defp do_sanitize(val, _depth_limit) when is_reference(val), do: inspect(val) + + defp do_sanitize(val, _depth_limit) when is_function(val) do + try do + arity = Function.info(val, :arity) |> elem(1) + + case Function.info(val, :name) do + {:name, name} when is_atom(name) and name != :"" -> + "" + + _ -> + "" + end + catch + _, _ -> "" + end + end + + defp do_sanitize(val, depth_limit) when is_list(val) do + if depth_limit <= 0 do + "[...trimmed_list...]" + else + Enum.map(val, &do_sanitize(&1, depth_limit - 1)) + end + end + + defp do_sanitize(val, depth_limit) when is_map(val) do + if depth_limit <= 0 do + "%{...trimmed_map...}" + else + # Regular map + if Map.has_key?(val, :__struct__) do + module = val.__struct__ + # Sanitize only exposed fields, not internal :__meta__ or the :__struct__ key itself + data_to_sanitize = val |> Map.delete(:__struct__) |> Map.delete(:__meta__) + + sanitized_fields = + data_to_sanitize + |> Enum.map(fn {k, v} -> {k, do_sanitize(v, depth_limit - 1)} end) + |> Enum.into(%{}) + + %{type: :struct, module: module, fields: sanitized_fields} + else + val + |> Enum.map(fn {k, v} -> + {do_sanitize(k, depth_limit - 1), do_sanitize(v, depth_limit - 1)} + end) + |> Enum.into(%{}) + end + end + end + + defp do_sanitize(val, depth_limit) when is_tuple(val) do + if depth_limit <= 0 do + "{...trimmed_tuple...}" + else + val |> Tuple.to_list() |> Enum.map(&do_sanitize(&1, depth_limit - 1)) |> List.to_tuple() + end + end + + # Fallback for other types (e.g., DateTime, Date, Time, NaiveDateTime) + defp do_sanitize(val, _depth_limit) do + # For common structs that have `to_string` or are Calendar types + cond do + is_struct(val, DateTime) or is_struct(val, Date) or is_struct(val, Time) or + is_struct(val, NaiveDateTime) -> + # Jason can handle these if they are ISO8601 strings + inspect(val) + + true -> + # Generic struct or unknown type + if is_struct(val) do + # A simpler representation + "Struct<#{inspect(val.__struct__)}>" + else + # Fallback, converts to string + inspect(val) + end + end + end + + def sanitize_error(exception_instance, stacktrace) do + %{ + type: :error, + class: Atom.to_string(exception_instance.__struct__), + message: Exception.message(exception_instance), + # Sanitize stacktrace to keep it manageable + stacktrace: Enum.map(stacktrace, &Exception.format_stacktrace_entry/1) |> Enum.take(15) + } + end +end + +defmodule Tdd.Debug do + @moduledoc """ + Provides macros to wrap `def` and `defp` for function call/return tracing. + Logs arguments and return values using `IO.inspect`. + Builds an in-memory call tree data structure per traced process. + """ + + # --- Agent for Tracing State --- + @agent_name Tdd.Debug.StateAgent + + def init_agent_if_needed do + case Process.whereis(@agent_name) do + nil -> Agent.start_link(fn -> MapSet.new() end, name: @agent_name) + _pid -> :ok + end + + :ok + end + + def add_traced_pid(pid) when is_pid(pid) do + init_agent_if_needed() + Agent.update(@agent_name, &MapSet.put(&1, pid)) + end + + def remove_traced_pid(pid) when is_pid(pid) do + case Process.whereis(@agent_name) do + nil -> :ok + agent_pid -> Agent.cast(agent_pid, fn state -> MapSet.delete(state, pid) end) + end + end + + def is_pid_traced?(pid) when is_pid(pid) do + case Process.whereis(@agent_name) do + nil -> + false + + agent_pid -> + try do + Agent.get(agent_pid, &MapSet.member?(&1, pid), :infinity) + rescue + # Agent might have died + _e in [Exit, ArgumentError] -> false + end + end + end + + # --- Tracing Data Storage --- + @doc """ + Initializes the tracing data structures in the process dictionary. + """ + def init_trace_storage do + Process.put(:tdd_debug_call_stack, []) + Process.put(:tdd_debug_session_roots, []) + :ok + end + + @doc """ + Retrieves the collected trace data (a list of root call tree nodes) + for the current process and clears it from the process dictionary. + Children within each node and the root list itself are reversed to be in chronological order. + """ + def get_and_clear_trace_data do + data = Process.get(:tdd_debug_session_roots, []) + # Reverse children recursively and then reverse the list of roots + processed_data = Enum.map(data, &reverse_children_recursively/1) |> Enum.reverse() + + Process.delete(:tdd_debug_session_roots) + Process.delete(:tdd_debug_call_stack) + processed_data + end + + defp reverse_children_recursively(node) do + if !node.children or node.children == [] do + node + else + reversed_and_processed_children = + Enum.map(node.children, &reverse_children_recursively/1) + # Children were added prepending, so reverse for chronological + |> Enum.reverse() + + %{node | children: reversed_and_processed_children} + end + end + + # --- Tracing Control Functions --- + @doc "Enables function call tracing for the current process." + def enable_tracing do + pid_to_trace = self() + add_traced_pid(pid_to_trace) + # Initialize storage for call trees + init_trace_storage() + + ref = Process.monitor(pid_to_trace) + + Process.spawn( + fn -> + receive do + {:DOWN, ^ref, :process, ^pid_to_trace, _reason} -> + remove_traced_pid(pid_to_trace) + after + # 1 hour safety timeout + 3_600_000 -> + remove_traced_pid(pid_to_trace) + end + end, + # Changed from [:monitor] to [] as monitor is implicit with Process.monitor + [:monitor] + ) + + :ok + end + + @doc "Disables function call tracing for the current process." + def disable_tracing do + remove_traced_pid(self()) + # Note: Does not clear trace data, get_and_clear_trace_data() does that. + :ok + end + + @doc """ + Runs the given 0-arity function with tracing enabled. + Returns a tuple: `{{:ok, result} | {:error, {exception, stacktrace}}, trace_data}`. + Trace data is a list of call tree root nodes. + """ + def run(fun) when is_function(fun, 0) do + enable_tracing() + + outcome = + try do + {:ok, fun.()} + rescue + kind -> {:error, {kind, __STACKTRACE__}} + end + + trace_data = get_and_clear_trace_data() + disable_tracing() + # trace_data + # |> IO.inspect() + binary = JSON.encode!(trace_data) + File.write("trace.json", binary) + {outcome, trace_data} + end + + # --- Process Dictionary for Call Depth (used for printing indent) --- + defp get_depth, do: Process.get(:tdd_debug_depth, 0) + + def increment_depth do + new_depth = get_depth() + 1 + Process.put(:tdd_debug_depth, new_depth) + new_depth + end + + def decrement_depth do + new_depth = max(0, get_depth() - 1) + Process.put(:tdd_debug_depth, new_depth) + new_depth + end + + # --- Core Macro Logic --- + defmacro __using__(_opts) do + quote do + import Kernel, except: [def: 1, def: 2, defp: 1, defp: 2] + # Import this module's public functions/macros + import Tdd.Debug + # Ensure this module is compiled for macros + require Tdd.Debug + end + end + + @doc false + defmacro def(call, clauses \\ Keyword.new()) do + generate_traced_function(:def, call, clauses, __CALLER__) + end + + @doc false + defmacro defp(call, clauses \\ Keyword.new()) do + generate_traced_function(:defp, call, clauses, __CALLER__) + end + + defp is_simple_variable_ast?(ast_node) do + case ast_node do + {var_name, _meta, context_module} + when is_atom(var_name) and (context_module == nil or is_atom(context_module)) -> + # Ensure it's not the underscore atom itself + var_name != :_ + + _ -> + false + end + end + + defp generate_traced_function(type, call_ast, clauses, caller_env) do + require Macro + + {function_name_ast, meta_call, original_args_patterns_ast_nullable} = call_ast + original_args_patterns_ast_list = original_args_patterns_ast_nullable || [] + + original_body_ast = + if Keyword.keyword?(clauses) do + Keyword.get(clauses, :do, clauses) + else + clauses + end || quote(do: nil) + + # --- Argument Handling for Logging (Existing Logic) --- + mapped_and_generated_vars_tuples = + Enum.map(Enum.with_index(original_args_patterns_ast_list), fn {original_pattern_ast, index} -> + td_arg_var = Macro.var(String.to_atom("__td_arg_#{index}__"), nil) + + {final_pattern_for_head, rhs_for_td_arg_assignment} = + case original_pattern_ast do + {:_, _, _} = underscore_ast -> + {underscore_ast, quote(do: :__td_ignored_argument__)} + + {:=, _meta_assign, [lhs_of_assign, _rhs_of_assign]} = assignment_pattern_ast -> + if is_simple_variable_ast?(lhs_of_assign) do + {assignment_pattern_ast, lhs_of_assign} + else + captured_val_var = Macro.unique_var(String.to_atom("tdc_assign_#{index}"), Elixir) + + new_head_pattern = + quote do + unquote(captured_val_var) = unquote(assignment_pattern_ast) + end + + {new_head_pattern, captured_val_var} + end + + {:\\, _meta_default, [pattern_before_default, _default_value_ast]} = + default_arg_pattern_ast -> + cond do + is_simple_variable_ast?(pattern_before_default) -> + {default_arg_pattern_ast, pattern_before_default} + + match?({:=, _, [lhs_inner_assign, _]}, pattern_before_default) and + is_simple_variable_ast?(pattern_before_default |> elem(2) |> Enum.at(0)) -> + {:=, _, [lhs_inner_assign, _]} = pattern_before_default + {default_arg_pattern_ast, lhs_inner_assign} + + true -> + captured_val_var = Macro.unique_var(String.to_atom("tdc_def_#{index}"), Elixir) + + new_head_pattern = + quote do + unquote(captured_val_var) = unquote(default_arg_pattern_ast) + end + + {new_head_pattern, captured_val_var} + end + + ast_node -> + if is_simple_variable_ast?(ast_node) do + {ast_node, ast_node} + else + captured_val_var = Macro.unique_var(String.to_atom("tdc_pat_#{index}"), Elixir) + + new_head_pattern = + quote do + unquote(captured_val_var) = unquote(ast_node) + end + + {new_head_pattern, captured_val_var} + end + end + + assignment_ast = + quote do + unquote(td_arg_var) = unquote(rhs_for_td_arg_assignment) + end + + {final_pattern_for_head, assignment_ast, td_arg_var} + end) + + {new_args_patterns_for_head_list, assignments_for_logging_vars_ast_list, + generated_vars_to_log_asts} = + if mapped_and_generated_vars_tuples == [] do + {[], [], []} + else + # Transpose list of 3-element tuples into 3 lists + list_of_lists = Enum.map(mapped_and_generated_vars_tuples, &Tuple.to_list(&1)) + + [ + Enum.map(list_of_lists, &List.first(&1)), + Enum.map(list_of_lists, &Enum.at(&1, 1)), + Enum.map(list_of_lists, &Enum.at(&1, 2)) + ] + |> then(fn [a, b, c] -> {a, b, c} end) + end + + new_call_ast = {function_name_ast, meta_call, new_args_patterns_for_head_list} + + # --- Variables for Runtime Info --- + # These vars will hold string versions of module/function name and arity at runtime + # Hygienic vars to avoid collisions. + module_name_runtime_var = Macro.var(:__td_module_name__, __MODULE__) + printable_fn_name_runtime_var = Macro.var(:__td_printable_fn_name__, __MODULE__) + arity_runtime_var = Macro.var(:__td_arity__, __MODULE__) + + # Arity calculation at macro expansion time + arity_value = length(original_args_patterns_ast_list) + + traced_body_inner_ast = + quote do + # --- Resolve Module/Function/Arity Info at Runtime --- + unquote(module_name_runtime_var) = Atom.to_string(unquote(caller_env.module)) + unquote(arity_runtime_var) = unquote(arity_value) + + # Resolve var if func name is from a var + runtime_resolved_fn_name_val = unquote(function_name_ast) + + unquote(printable_fn_name_runtime_var) = + if is_atom(runtime_resolved_fn_name_val) do + Atom.to_string(runtime_resolved_fn_name_val) + else + # For unquote(var) or operators + Macro.to_string(runtime_resolved_fn_name_val) + end + + # --- Main Tracing Logic --- + if Tdd.Debug.is_pid_traced?(self()) do + # --- On Entry: Prepare Data & Log --- + # Assign __td_arg_N__ vars + unquote_splicing(assignments_for_logging_vars_ast_list) + runtime_arg_values_for_node_and_log = [unquote_splicing(generated_vars_to_log_asts)] + + # sanitized_args_for_node = Tdd.Debug.TracerData.sanitize_values(runtime_arg_values_for_node_and_log) + sanitized_args_for_node = inspect(runtime_arg_values_for_node_and_log) + # For print indent & node depth + current_call_print_depth = Tdd.Debug.increment_depth() + + # Create placeholder node, push to call_stack + new_node_details = %{ + id: System.unique_integer([:positive, :monotonic]), + # module: unquote(module_name_runtime_var), + # function: unquote(printable_fn_name_runtime_var), + function: + "#{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}", + # arity: unquote(arity_runtime_var), + args: sanitized_args_for_node, + depth: current_call_print_depth, + # timestamp_enter_monotonic: System.monotonic_time(), + # Will be populated in reverse chronological order + children: [], + # Placeholder + result: :__td_not_returned_yet__ + } + + Process.put(:tdd_debug_call_stack, [ + new_node_details | Process.get(:tdd_debug_call_stack, []) + ]) + + # Logging Call (existing logic) + indent = String.duplicate(" ", current_call_print_depth - 1) + + IO.puts( + "#{indent}CALL: #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}" + ) + + IO.puts("#{indent} ARGS: #{inspect(runtime_arg_values_for_node_and_log)}") + + try do + # --- Execute Original Body --- + result_val = unquote(original_body_ast) + + # --- On Normal Exit: Finalize Node & Log --- + [completed_node_placeholder | parent_stack_nodes] = Process.get(:tdd_debug_call_stack) + ts_exit_monotonic = System.monotonic_time() + + # duration_ms = System.convert_time_unit(ts_exit_monotonic - completed_node_placeholder.timestamp_enter_monotonic, :native, :millisecond) + sanitized_result_val = Tdd.Debug.TracerData.sanitize_value(result_val) + + finalized_node = + completed_node_placeholder + # |> Map.put(:timestamp_exit_monotonic, ts_exit_monotonic) + # |> Map.put(:duration_ms, duration_ms) + |> Map.put(:result, %{type: :ok, value: sanitized_result_val}) + + # For print indent + _ = Tdd.Debug.decrement_depth() + + # Update call stack: Add finalized_node to parent's children or session_roots + new_call_stack_after_success = + case parent_stack_nodes do + [parent_on_stack | grand_parent_stack_nodes] -> + updated_parent = %{ + parent_on_stack + | children: [finalized_node | parent_on_stack.children] + } + + [updated_parent | grand_parent_stack_nodes] + + # This was a root call + [] -> + Process.put(:tdd_debug_session_roots, [ + finalized_node | Process.get(:tdd_debug_session_roots, []) + ]) + + # New call_stack is empty for this branch + [] + end + + Process.put(:tdd_debug_call_stack, new_call_stack_after_success) + + # Logging Return + IO.puts( + "#{indent}RETURN from #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}: #{inspect(result_val)}" + ) + + # Return actual result + result_val + rescue + exception_class_err -> + # --- On Error Exit: Finalize Node & Log --- + error_instance_err = exception_class_err + stacktrace_err = __STACKTRACE__ + + [erroring_node_placeholder | parent_stack_nodes_err] = + Process.get(:tdd_debug_call_stack) + + ts_exit_monotonic_err = System.monotonic_time() + + duration_ms_err = + System.convert_time_unit( + ts_exit_monotonic_err - erroring_node_placeholder.timestamp_enter_monotonic, + :native, + :millisecond + ) + + sanitized_error_info = + Tdd.Debug.TracerData.sanitize_error(error_instance_err, stacktrace_err) + + finalized_error_node = + erroring_node_placeholder + |> Map.put(:timestamp_exit_monotonic, ts_exit_monotonic_err) + |> Map.put(:duration_ms, duration_ms_err) + |> Map.put(:result, sanitized_error_info) + + # For print indent + _ = Tdd.Debug.decrement_depth() + + # Update call stack: Add finalized_error_node to parent's children or session_roots + new_call_stack_after_error = + case parent_stack_nodes_err do + [parent_on_stack_err | grand_parent_stack_nodes_err] -> + updated_parent_err = %{ + parent_on_stack_err + | children: [finalized_error_node | parent_on_stack_err.children] + } + + [updated_parent_err | grand_parent_stack_nodes_err] + + # This was a root call that errored + [] -> + Process.put(:tdd_debug_session_roots, [ + finalized_error_node | Process.get(:tdd_debug_session_roots, []) + ]) + + # New call_stack is empty for this branch + [] + end + + Process.put(:tdd_debug_call_stack, new_call_stack_after_error) + + # Logging Error + IO.puts( + "#{indent}ERROR in #{unquote(module_name_runtime_var)}.#{unquote(printable_fn_name_runtime_var)}: #{inspect(error_instance_err)}" + ) + + # Reraise the original error + reraise error_instance_err, stacktrace_err + end + else + # --- Not Traced: Execute Original Body Directly --- + unquote(original_body_ast) + end + end + + # --- Final Definition --- + final_definition_ast = + quote location: :keep do + Kernel.unquote(type)( + unquote(new_call_ast), + do: unquote(traced_body_inner_ast) + ) + end + + final_definition_ast + end + + # --- TDD Graph Printing (Kept as it was, assuming it's for a different purpose) --- + @doc "Prints a formatted representation of a TDD graph structure." + def print_tdd_graph(id, store_module \\ Tdd.Store) do + IO.puts("--- TDD Graph (ID: #{id}) ---") + do_print_tdd_node(id, 0, MapSet.new(), store_module) + IO.puts("------------------------") + end + + defp do_print_tdd_node(id, indent_level, visited, store_module) do + prefix = String.duplicate(" ", indent_level) + + if MapSet.member?(visited, id) do + IO.puts("#{prefix}ID #{id} -> [Seen, recursive link]") + :ok + else + new_visited = MapSet.put(visited, id) + # Assumes store_module.get_node/1 exists + case store_module.get_node(id) do + {:ok, :true_terminal} -> + IO.puts("#{prefix}ID #{id} -> TRUE") + + {:ok, :false_terminal} -> + IO.puts("#{prefix}ID #{id} -> FALSE") + + {:ok, {var, y_id, n_id, dc_id}} -> + IO.puts("#{prefix}ID #{id}: IF #{inspect(var)}") + IO.puts("#{prefix} ├─ Yes (to ID #{y_id}):") + do_print_tdd_node(y_id, indent_level + 2, new_visited, store_module) + IO.puts("#{prefix} ├─ No (to ID #{n_id}):") + do_print_tdd_node(n_id, indent_level + 2, new_visited, store_module) + IO.puts("#{prefix} └─ DC (to ID #{dc_id}):") + do_print_tdd_node(dc_id, indent_level + 2, new_visited, store_module) + + {:error, reason} -> + IO.puts("#{prefix}ID #{id}: ERROR - #{reason}") + end + end + end + + @doc "Logs a value with a label, using IO.inspect." + def log(value, label, opts \\ []) do + # You can add a flag here to disable all logging globally + if true do + # Default options for better visibility + default_opts = [width: 120, pretty: true] + final_opts = Keyword.merge(default_opts, opts) + IO.inspect(value, [{:label, "[DEBUG] #{label}"} | final_opts]) + end + + # Return the original value to allow piping + value + end + + @doc "Gets and increments a depth counter for tracing recursion." + defp get_depth() do + depth = Process.get(:debug_depth, 0) + Process.put(:debug_depth, depth + 1) + String.duplicate(" ", depth) + end + + @doc "Decrements the depth counter." + defp dec_depth() do + depth = Process.get(:debug_depth, 1) |> Kernel.-(1) |> max(0) + Process.put(:debug_depth, depth) + end + + @doc "Logs a message with indentation for tracing recursion." + def log_entry(label) do + prefix = get_depth() + IO.inspect(prefix, label: "PREFIX") + IO.puts("#{prefix}[DEBUG] >> #{label}") + end + + @doc "Logs a return value with indentation." + def log_exit(value, label) do + dec_depth() + prefix = String.duplicate(" ", Process.get(:debug_depth, 0)) + IO.inspect(value, label: prefix <> "[DEBUG] << #{label}") + # Return the value + value + end +end diff --git a/lib/til.ex b/lib/til.ex index ae1cc31..291d014 100644 --- a/lib/til.ex +++ b/lib/til.ex @@ -1,18 +1,2088 @@ -defmodule Til do +defmodule Tdd.TypeSpec do @moduledoc """ - Documentation for `Til`. + Defines the `TypeSpec` structure and functions for its manipulation. + Normalization includes alpha-conversion, beta-reduction, and a final + canonical renaming pass for bound variables. """ + # --- Core Types --- + @type t :: + :any + | :none + | :atom + | :integer + | :list + | :tuple + | {:literal, term()} + | {:union, [t()]} + | {:intersect, [t()]} + | {:negation, t()} + | {:tuple, [t()]} + | {:cons, head :: t(), tail :: t()} + | {:list_of, element :: t()} + | {:integer_range, min :: integer() | :neg_inf, max :: integer() | :pos_inf} + | {:type_var, name :: atom()} + | {:mu, type_variable_name :: atom(), body :: t()} + | {:type_lambda, param_names :: [atom()], body :: t()} + | {:type_apply, constructor_spec :: t(), arg_specs :: [t()]} + @doc """ - Hello world. - - ## Examples - - iex> Til.hello() - :world - + Converts a `TypeSpec` into its canonical (normalized) form. + Performs structural normalization, alpha-conversion, beta-reduction, + and a final canonical renaming pass for all bound variables. """ - def hello do - :world + @spec normalize(t()) :: t() + def normalize(spec) do + {intermediate_normalized, _counter_after_pass1} = normalize_pass1(spec, %{}, 0) + + {final_spec_before_subtype_redux, _mu_counter, _lambda_counter} = + canonical_rename_pass(intermediate_normalized, %{}, 0, 0) + + apply_subtype_reduction(final_spec_before_subtype_redux) + end + + # Final pass for subtype-based reductions on fully canonical specs + defp apply_subtype_reduction(spec) do + case spec do + {:union, members} -> + recursively_reduced_members = Enum.map(members, &apply_subtype_reduction/1) + + flattened_members = + Enum.flat_map(recursively_reduced_members, fn + {:union, sub_members} -> sub_members + m -> [m] + end) + + unique_no_none = + flattened_members + |> Enum.reject(&(&1 == :none)) + |> Enum.uniq() + + if Enum.member?(unique_no_none, :any) do + :any + else + # Pass `true` for already_normalized flag to is_subtype? + final_members = + Enum.reject(unique_no_none, fn member_to_check -> + Enum.any?(unique_no_none, fn other_member -> + member_to_check != other_member and + is_subtype?(member_to_check, other_member, true) + end) + end) + + case Enum.sort(final_members) do + [] -> :none + [single] -> single + list_members -> {:union, list_members} + end + end + + {:intersect, members} -> + recursively_reduced_members = Enum.map(members, &apply_subtype_reduction/1) + + expanded_flattened_members = + Enum.flat_map(recursively_reduced_members, fn + {:intersect, sub_members} -> sub_members + # get_supertypes expects normalized spec, and its output is also normalized + # Pass flag + m -> get_supertypes(m, true) + end) + + unique_no_any = + expanded_flattened_members + |> Enum.reject(&(&1 == :any)) + |> Enum.uniq() + + if Enum.member?(unique_no_any, :none) do + :none + else + # Pass `true` for already_normalized flag to is_subtype? + final_members = + Enum.reject(unique_no_any, fn member_to_check -> + Enum.any?(unique_no_any, fn other_member -> + member_to_check != other_member and + is_subtype?(other_member, member_to_check, true) + end) + end) + + case Enum.sort(final_members) do + [] -> :any + [single] -> single + list_members -> {:intersect, list_members} + end + end + + {:negation, body} -> + {:negation, apply_subtype_reduction(body)} + + {:tuple, elements} -> + {:tuple, Enum.map(elements, &apply_subtype_reduction/1)} + + {:cons, head, tail} -> + {:cons, apply_subtype_reduction(head), apply_subtype_reduction(tail)} + + {:mu, var_name, body} -> + {:mu, var_name, apply_subtype_reduction(body)} + + {:type_lambda, params, body} -> + {:type_lambda, params, apply_subtype_reduction(body)} + + {:type_apply, constructor, args} -> + {:type_apply, apply_subtype_reduction(constructor), + Enum.map(args, &apply_subtype_reduction/1)} + + atomic_or_literal -> + atomic_or_literal + end + end + + # ------------------------------------------------------------------ + # Pass 1: Structural Normalization, Beta-Reduction, Initial Alpha-Conversion + # Returns: {normalized_spec, next_counter} + # ------------------------------------------------------------------ + defp normalize_pass1(spec, env, counter) do + res_tuple = + case spec do + s when is_atom(s) and s in [:any, :none, :atom, :integer, :list, :tuple] -> + {s, counter} + + {:literal, _val} = lit_spec -> + {lit_spec, counter} + + {:type_var, name} -> + {Map.get(env, name, spec), counter} + + {:negation, sub_spec} -> + normalize_negation_pass1(sub_spec, env, counter) + + {:tuple, elements} -> + {normalized_elements, next_counter_after_elements} = + map_fold_counter_for_pass1(elements, env, counter, &normalize_pass1/3) + + {{:tuple, normalized_elements}, next_counter_after_elements} + + {:cons, head, tail} -> + {normalized_head, counter_after_head} = normalize_pass1(head, env, counter) + {normalized_tail, counter_after_tail} = normalize_pass1(tail, env, counter_after_head) + {{:cons, normalized_head, normalized_tail}, counter_after_tail} + + {:integer_range, min, max} -> + range_spec = + if is_integer(min) and is_integer(max) and min > max do + :none + else + {:integer_range, min, max} + end + + {range_spec, counter} + + {:union, members} -> + normalize_union_pass1(members, env, counter) + + {:intersect, members} -> + normalize_intersection_pass1(members, env, counter) + + {:list_of, element_spec} -> + # We transform `list_of(E)` into a `mu` expression. + # This expression will then be normalized by a recursive call. + # First, normalize the element's spec. + {normalized_element, counter_after_element} = + normalize_pass1(element_spec, env, counter) + + # Create a *temporary, non-canonical* name for the recursive variable. + # The subsequent `normalize_pass1` call on the `mu` form will perform + # the proper, canonical renaming. + temp_rec_var = :"$list_of_rec_var" + + list_body = + {:union, + [ + {:literal, []}, + {:cons, normalized_element, {:type_var, temp_rec_var}} + ]} + + # Now, normalize the full mu-expression. This is the crucial step. + # It will handle alpha-conversion of `temp_rec_var` and normalization + # of the body's components. + normalize_pass1({:mu, temp_rec_var, list_body}, env, counter_after_element) + + {:mu, var_name, body} -> + # This logic is correct. It creates a fresh canonical name and + # adds it to the environment for normalizing the body. + fresh_temp_name = fresh_var_name(:p1_m_var, counter) + body_env = Map.put(env, var_name, {:type_var, fresh_temp_name}) + + {normalized_body, next_counter_after_body} = + normalize_pass1(body, body_env, counter + 1) + + {{:mu, fresh_temp_name, normalized_body}, next_counter_after_body} + + {:type_lambda, param_names, body} -> + {reversed_fresh_temp_names, next_counter_after_params, body_env} = + Enum.reduce(param_names, {[], counter, env}, fn param_name, + {acc_fresh_names, cnt, current_env} -> + fresh_name = fresh_var_name(:p1_lambda_var, cnt) + + {[fresh_name | acc_fresh_names], cnt + 1, + Map.put(current_env, param_name, {:type_var, fresh_name})} + end) + + fresh_temp_param_names = Enum.reverse(reversed_fresh_temp_names) + + {normalized_body, final_counter} = + normalize_pass1(body, body_env, next_counter_after_params) + + {{:type_lambda, fresh_temp_param_names, normalized_body}, final_counter} + + {:type_apply, constructor_spec, arg_specs} -> + {normalized_constructor, counter_after_constructor} = + normalize_pass1(constructor_spec, env, counter) + + {normalized_arg_specs, counter_after_args} = + map_fold_counter_for_pass1( + arg_specs, + env, + counter_after_constructor, + &normalize_pass1/3 + ) + + case normalized_constructor do + {:type_lambda, pass1_formal_params, pass1_lambda_body} -> + if length(pass1_formal_params) != length(normalized_arg_specs) do + raise "TypeSpec.normalize_pass1: Arity mismatch in application. Expected #{length(pass1_formal_params)} args, got #{length(normalized_arg_specs)}. Lambda: #{inspect(normalized_constructor)}, Args: #{inspect(normalized_arg_specs)}" + else + substitution_map = Map.new(Enum.zip(pass1_formal_params, normalized_arg_specs)) + + substituted_body = + substitute_vars_pass1(pass1_lambda_body, substitution_map, MapSet.new()) + + normalize_pass1(substituted_body, env, counter_after_args) + end + + _other_constructor -> + {{:type_apply, normalized_constructor, normalized_arg_specs}, counter_after_args} + end + + other_spec -> + raise "TypeSpec.normalize_pass1: Unhandled spec form: #{inspect(other_spec)}" + end + + res_tuple + end + + defp map_fold_counter_for_pass1(list, env, initial_counter, fun) do + Enum.map_reduce(list, initial_counter, fn item, acc_counter -> + fun.(item, env, acc_counter) + end) + end + + defp substitute_vars_pass1(spec, substitutions, bound_in_scope) do + case spec do + {:type_var, name} -> + if MapSet.member?(bound_in_scope, name) do + spec + else + Map.get(substitutions, name, spec) + end + + {:mu, var_name, body} -> + newly_bound_scope = MapSet.put(bound_in_scope, var_name) + active_substitutions = Map.delete(substitutions, var_name) + {:mu, var_name, substitute_vars_pass1(body, active_substitutions, newly_bound_scope)} + + {:type_lambda, param_names, body} -> + newly_bound_scope = Enum.reduce(param_names, bound_in_scope, &MapSet.put(&2, &1)) + active_substitutions = Enum.reduce(param_names, substitutions, &Map.delete(&2, &1)) + + {:type_lambda, param_names, + substitute_vars_pass1(body, active_substitutions, newly_bound_scope)} + + {:negation, sub} -> + {:negation, substitute_vars_pass1(sub, substitutions, bound_in_scope)} + + {:tuple, elements} -> + {:tuple, Enum.map(elements, &substitute_vars_pass1(&1, substitutions, bound_in_scope))} + + {:cons, h, t} -> + {:cons, substitute_vars_pass1(h, substitutions, bound_in_scope), + substitute_vars_pass1(t, substitutions, bound_in_scope)} + + {:list_of, e} -> + {:list_of, substitute_vars_pass1(e, substitutions, bound_in_scope)} + + {:union, members} -> + {:union, Enum.map(members, &substitute_vars_pass1(&1, substitutions, bound_in_scope))} + + {:intersect, members} -> + {:intersect, Enum.map(members, &substitute_vars_pass1(&1, substitutions, bound_in_scope))} + + {:type_apply, con, args} -> + new_con = substitute_vars_pass1(con, substitutions, bound_in_scope) + new_args = Enum.map(args, &substitute_vars_pass1(&1, substitutions, bound_in_scope)) + {:type_apply, new_con, new_args} + + _atomic_or_simple_spec -> + spec + end + end + + defp normalize_negation_pass1(sub_spec, env, counter) do + {normalized_sub, next_counter} = normalize_pass1(sub_spec, env, counter) + + res_spec = + case normalized_sub do + {:negation, inner_spec} -> inner_spec + :any -> :none + :none -> :any + _ -> {:negation, normalized_sub} + end + + {res_spec, next_counter} + end + + defp normalize_union_pass1(members, env, initial_counter) do + {list_of_normalized_member_lists, final_counter_after_all_members} = + Enum.map_reduce(members, initial_counter, fn member_spec, current_processing_counter -> + {normalized_member_spec_term, counter_after_this_member_normalized} = + normalize_pass1(member_spec, env, current_processing_counter) + + members_to_add_to_overall_list = + case normalized_member_spec_term do + {:union, sub_members} -> sub_members + _ -> [normalized_member_spec_term] + end + + {members_to_add_to_overall_list, counter_after_this_member_normalized} + end) + + normalized_and_flattened = List.flatten(list_of_normalized_member_lists) + + unique_members = + normalized_and_flattened + |> Enum.reject(&(&1 == :none)) + |> Enum.uniq() + + if Enum.member?(unique_members, :any) do + {:any, final_counter_after_all_members} + else + sorted_for_pass1 = Enum.sort(unique_members) + + resulting_spec = + case sorted_for_pass1 do + [] -> :none + [single_member] -> single_member + list_members -> {:union, list_members} + end + + {resulting_spec, final_counter_after_all_members} + end + end + + defp normalize_intersection_pass1(members, env, initial_counter) do + {list_of_member_groups, final_counter_after_all_members} = + Enum.map_reduce(members, initial_counter, fn member_spec, current_processing_counter -> + {normalized_member_spec_term, counter_after_this_member_normalized} = + normalize_pass1(member_spec, env, current_processing_counter) + + expanded_members = + case normalized_member_spec_term do + {:intersect, sub_members} -> sub_members + _ -> get_supertypes_pass1(normalized_member_spec_term) + end + + {expanded_members, counter_after_this_member_normalized} + end) + + normalized_and_flattened_with_supertypes = List.flatten(list_of_member_groups) + + unique_members = + normalized_and_flattened_with_supertypes + |> Enum.reject(&(&1 == :any)) + |> Enum.uniq() + + if Enum.member?(unique_members, :none) do + {:none, final_counter_after_all_members} + else + sorted_for_pass1 = Enum.sort(unique_members) + + resulting_spec = + case sorted_for_pass1 do + [] -> :any + [single_member] -> single_member + list_members -> {:intersect, list_members} + end + + {resulting_spec, final_counter_after_all_members} + end + end + + defp get_supertypes_pass1(spec) do + supertypes = + case spec do + {:literal, val} when is_atom(val) -> [:atom] + {:literal, val} when is_integer(val) -> [:integer] + {:literal, val} when is_list(val) -> [:list] + {:literal, val} when is_tuple(val) -> [:tuple] + {:mu, _v, _body} -> [] + {:tuple, _} -> [:tuple] + {:integer_range, _, _} -> [:integer] + _ -> [] + end + + MapSet.to_list(MapSet.new([spec | supertypes])) + end + + defp canonical_rename_pass(spec, env, mu_c, lambda_c) do + case spec do + {:mu, old_var_name, body} -> + new_canonical_name = fresh_var_name(:m_var, mu_c) + body_env = Map.put(env, old_var_name, {:type_var, new_canonical_name}) + + {renamed_body, next_mu_c, next_lambda_c} = + canonical_rename_pass(body, body_env, mu_c + 1, lambda_c) + + {{:mu, new_canonical_name, renamed_body}, next_mu_c, next_lambda_c} + + {:type_lambda, old_param_names, body} -> + {reversed_new_param_names, next_lambda_c_after_params, body_env} = + Enum.reduce(old_param_names, {[], lambda_c, env}, fn old_name, + {acc_new_names, current_lc, + current_env} -> + fresh_canonical_name = fresh_var_name(:lambda_var, current_lc) + + {[fresh_canonical_name | acc_new_names], current_lc + 1, + Map.put(current_env, old_name, {:type_var, fresh_canonical_name})} + end) + + new_canonical_param_names = Enum.reverse(reversed_new_param_names) + + {renamed_body, final_mu_c, final_lambda_c} = + canonical_rename_pass(body, body_env, mu_c, next_lambda_c_after_params) + + {{:type_lambda, new_canonical_param_names, renamed_body}, final_mu_c, final_lambda_c} + + {:type_var, name} -> + {Map.get(env, name, spec), mu_c, lambda_c} + + {:negation, sub_spec} -> + {renamed_sub, nmc, nlc} = canonical_rename_pass(sub_spec, env, mu_c, lambda_c) + {{:negation, renamed_sub}, nmc, nlc} + + {:tuple, elements} -> + {renamed_elements, next_mu_c, next_lambda_c} = + map_foldl_counters_for_rename(elements, env, mu_c, lambda_c, &canonical_rename_pass/4) + + {{:tuple, renamed_elements}, next_mu_c, next_lambda_c} + + {:cons, head, tail} -> + {renamed_head, mu_c_after_head, lambda_c_after_head} = + canonical_rename_pass(head, env, mu_c, lambda_c) + + {renamed_tail, mu_c_after_tail, lambda_c_after_tail} = + canonical_rename_pass(tail, env, mu_c_after_head, lambda_c_after_head) + + {{:cons, renamed_head, renamed_tail}, mu_c_after_tail, lambda_c_after_tail} + + {:union, members} -> + sorted_members = Enum.sort(members) + + {renamed_members, next_mu_c, next_lambda_c} = + map_foldl_counters_for_rename( + sorted_members, + env, + mu_c, + lambda_c, + &canonical_rename_pass/4 + ) + + {{:union, Enum.sort(renamed_members)}, next_mu_c, next_lambda_c} + + {:intersect, members} -> + sorted_members = Enum.sort(members) + + {renamed_members, next_mu_c, next_lambda_c} = + map_foldl_counters_for_rename( + sorted_members, + env, + mu_c, + lambda_c, + &canonical_rename_pass/4 + ) + + {{:intersect, Enum.sort(renamed_members)}, next_mu_c, next_lambda_c} + + {:type_apply, constructor_spec, arg_specs} -> + {renamed_constructor, mu_c_after_con, lambda_c_after_con} = + canonical_rename_pass(constructor_spec, env, mu_c, lambda_c) + + {renamed_args, mu_c_after_args, lambda_c_after_args} = + map_foldl_counters_for_rename( + arg_specs, + env, + mu_c_after_con, + lambda_c_after_con, + &canonical_rename_pass/4 + ) + + {{:type_apply, renamed_constructor, renamed_args}, mu_c_after_args, lambda_c_after_args} + + s when is_atom(s) -> + {s, mu_c, lambda_c} + + {:literal, _} = spec -> + {spec, mu_c, lambda_c} + + {:integer_range, _, _} = spec -> + {spec, mu_c, lambda_c} + + {:list_of, _} = spec -> + raise "TypeSpec.canonical_rename_pass: Unexpected :list_of, should be :mu. Spec: #{inspect(spec)}" + + _other -> + raise "TypeSpec.canonical_rename_pass: Unhandled spec form: #{inspect(spec)}" + end + end + + defp map_foldl_counters_for_rename(list, env, initial_mu_c, initial_lambda_c, fun) do + {reversed_results, final_mu_c, final_lambda_c} = + Enum.reduce(list, {[], initial_mu_c, initial_lambda_c}, fn item, {acc_items, mc, lc} -> + {processed_item, next_mc, next_lc} = fun.(item, env, mc, lc) + {[processed_item | acc_items], next_mc, next_lc} + end) + + {Enum.reverse(reversed_results), final_mu_c, final_lambda_c} + end + + defp fresh_var_name(prefix_atom, counter) do + :"#{Atom.to_string(prefix_atom)}#{counter}" + end + + # Public API + @spec is_subtype?(t(), t()) :: boolean + def is_subtype?(spec1, spec2), do: is_subtype?(spec1, spec2, false) + + # Internal helper with already_normalized flag + @spec is_subtype?(t(), t(), boolean) :: boolean + def is_subtype?(spec1, spec2, already_normalized) do + cond do + spec1 == spec2 -> + true + + spec1 == :none -> + true + + spec2 == :any -> + true + + spec1 == :any and spec2 != :any -> + false + + spec2 == :none and spec1 != :none -> + false + + true -> + {norm_s1, norm_s2} = + if already_normalized do + {spec1, spec2} + else + {normalize(spec1), normalize(spec2)} + end + + if norm_s1 == norm_s2 do + true + else + do_is_subtype_structural?(norm_s1, norm_s2, MapSet.new()) + end + end + end + + defp do_is_subtype_structural?(spec1, spec2, visited) do + if MapSet.member?(visited, {spec1, spec2}) do + true + else + cond do + spec1 == :none -> + true + + spec2 == :any -> + true + + spec1 == :any and spec2 != :any -> + false + + spec2 == :none and spec1 != :none -> + false + + spec1 == spec2 -> + true + + true -> + new_visited = MapSet.put(visited, {spec1, spec2}) + + case {spec1, spec2} do + {{:union, members1}, _} -> + Enum.all?(members1, &do_is_subtype_structural?(&1, spec2, new_visited)) + + {_, {:union, members2}} -> + Enum.any?(members2, &do_is_subtype_structural?(spec1, &1, new_visited)) + + {{:intersect, members1}, _} -> + Enum.any?(members1, &do_is_subtype_structural?(&1, spec2, new_visited)) + + {_, {:intersect, members2}} -> + Enum.all?(members2, &do_is_subtype_structural?(spec1, &1, new_visited)) + + {s1, s2} + when is_atom(s1) and is_atom(s2) and s1 not in [:any, :none] and + s2 not in [:any, :none] -> + s1 == s2 + + {{:literal, v1}, {:literal, v2}} -> + v1 == v2 + + {{:literal, val}, :atom} when is_atom(val) -> + true + + {{:literal, val}, :integer} when is_integer(val) -> + true + + {{:literal, val}, :list} when is_list(val) -> + true + + {{:literal, val}, :tuple} when is_tuple(val) -> + true + + {{:tuple, elems1}, {:tuple, elems2}} when length(elems1) == length(elems2) -> + Enum.zip_with(elems1, elems2, &do_is_subtype_structural?(&1, &2, new_visited)) + |> Enum.all?() + + {{:tuple, _}, :tuple} -> + true + + {{:integer_range, _, _}, :integer} -> + true + + {{:integer_range, min1, max1}, {:integer_range, min2, max2}} -> + min1_gte_min2 = + case {min1, min2} do + {:neg_inf, _} -> min2 == :neg_inf + {_, :neg_inf} -> true + {m1_v, m2_v} when is_integer(m1_v) and is_integer(m2_v) -> m1_v >= m2_v + _ -> false + end + + max1_lte_max2 = + case {max1, max2} do + {:pos_inf, _} -> max2 == :pos_inf + {_, :pos_inf} -> true + {m1_v, m2_v} when is_integer(m1_v) and is_integer(m2_v) -> m1_v <= m2_v + _ -> false + end + + min1_gte_min2 and max1_lte_max2 + + {{:literal, val}, {:integer_range, min, max}} when is_integer(val) -> + (min == :neg_inf or val >= min) and (max == :pos_inf or val <= max) + + {{:mu, v1, b1_body}, {:mu, v2, b2_body}} -> + # This logic is from the original file, which is correct in principle + # but was failing due to the TDD layer bug. + cond do + is_list_mu_form(b1_body, v1) and is_list_mu_form(b2_body, v2) -> + e1 = extract_list_mu_element(b1_body, v1) + e2 = extract_list_mu_element(b2_body, v2) + do_is_subtype_structural?(e1, e2, new_visited) + + true -> + unfolded_b1 = substitute_vars_canonical(b1_body, %{v1 => spec1}) + do_is_subtype_structural?(unfolded_b1, spec2, new_visited) + end + + {_non_mu_spec, {:mu, v2, b2_body} = mu_spec2} -> + unfolded_b2 = substitute_vars_canonical(b2_body, %{v2 => mu_spec2}) + do_is_subtype_structural?(spec1, unfolded_b2, new_visited) + + {{:mu, v1, b1_body} = mu_spec1, _non_mu_spec} -> + unfolded_b1 = substitute_vars_canonical(b1_body, %{v1 => mu_spec1}) + do_is_subtype_structural?(unfolded_b1, spec2, new_visited) + + {{:negation, n_body1}, {:negation, n_body2}} -> + do_is_subtype_structural?(n_body2, n_body1, new_visited) + + _ -> + false + end + end + end + end + + defp substitute_vars_canonical(spec, substitutions) do + case spec do + {:type_var, name} -> + Map.get(substitutions, name, spec) + + {:mu, var_name, body} -> + active_substitutions = Map.delete(substitutions, var_name) + {:mu, var_name, substitute_vars_canonical(body, active_substitutions)} + + {:type_lambda, param_names, body} -> + active_substitutions = Enum.reduce(param_names, substitutions, &Map.delete(&2, &1)) + {:type_lambda, param_names, substitute_vars_canonical(body, active_substitutions)} + + {:negation, sub} -> + {:negation, substitute_vars_canonical(sub, substitutions)} + + {:tuple, elements} -> + {:tuple, Enum.map(elements, &substitute_vars_canonical(&1, substitutions))} + + {:cons, h, t} -> + {:cons, substitute_vars_canonical(h, substitutions), + substitute_vars_canonical(t, substitutions)} + + {:list_of, e} -> + {:list_of, substitute_vars_canonical(e, substitutions)} + + {:union, members} -> + {:union, Enum.map(members, &substitute_vars_canonical(&1, substitutions))} + + {:intersect, members} -> + {:intersect, Enum.map(members, &substitute_vars_canonical(&1, substitutions))} + + {:type_apply, con, args} -> + new_con = substitute_vars_canonical(con, substitutions) + new_args = Enum.map(args, &substitute_vars_canonical(&1, substitutions)) + {:type_apply, new_con, new_args} + + _atomic_or_simple_spec -> + spec + end + end + + defp is_list_mu_form({:union, members}, rec_var_name) do + sorted_members = Enum.sort(members) + + match?([{:literal, []}, {:cons, _elem, {:type_var, ^rec_var_name}}], sorted_members) or + match?([{:cons, _elem, {:type_var, ^rec_var_name}}, {:literal, []}], sorted_members) + end + + defp is_list_mu_form(_, _), do: false + + defp extract_list_mu_element({:union, members}, rec_var_name) do + Enum.find_value(members, fn + {:cons, elem_spec, {:type_var, ^rec_var_name}} -> elem_spec + _ -> nil + end) || :any + end + + # Public API for get_supertypes + def get_supertypes(spec), do: get_supertypes(spec, false) + + # Internal helper for get_supertypes + defp get_supertypes(spec_input, already_normalized) do + fully_normalized_spec = if already_normalized, do: spec_input, else: normalize(spec_input) + + supertypes = + case fully_normalized_spec do + {:literal, val} when is_atom(val) -> [:atom] + {:literal, val} when is_integer(val) -> [:integer] + {:literal, val} when is_list(val) -> [:list] + {:literal, val} when is_tuple(val) -> [:tuple] + {:mu, v, body} -> if is_list_mu_form(body, v), do: [:list], else: [] + {:tuple, _} -> [:tuple] + {:integer_range, _, _} -> [:integer] + _ -> [] + end + + MapSet.to_list(MapSet.new([fully_normalized_spec | supertypes])) + end +end + +defmodule Tdd.Store do + # NOTE: This module remains unchanged. + # The original provided code for this module is correct and complete. + @moduledoc """ + Manages the state of the TDD system's node graph and operation cache. + """ + + # --- State Keys --- + @nodes_key :tdd_nodes + @node_by_id_key :tdd_node_by_id + @next_id_key :tdd_next_id + @op_cache_key :tdd_op_cache + + # --- Terminal Node IDs --- + @false_node_id 0 + @true_node_id 1 + + # --- Public API --- + + @doc "Initializes the TDD store in the current process." + def init do + Process.put(@nodes_key, %{}) + + Process.put(@node_by_id_key, %{ + @false_node_id => :false_terminal, + @true_node_id => :true_terminal + }) + + Process.put(@next_id_key, 2) + Process.put(@op_cache_key, %{}) + :ok + end + + @doc "Returns the ID for the TRUE terminal node (the 'any' type)." + @spec true_node_id() :: non_neg_integer() + def true_node_id, do: @true_node_id + + @doc "Returns the ID for the FALSE terminal node (the 'none' type)." + @spec false_node_id() :: non_neg_integer() + def false_node_id, do: @false_node_id + + @doc "Retrieves the details of a node by its ID." + @spec get_node(non_neg_integer()) :: + {:ok, + {variable :: term(), yes_id :: non_neg_integer(), no_id :: non_neg_integer(), + dc_id :: non_neg_integer()}} + | {:ok, :true_terminal | :false_terminal} + | {:error, :not_found} + def get_node(id) do + case Process.get(@node_by_id_key, %{}) do + %{^id => details} -> {:ok, details} + %{} -> {:error, :not_found} + end + end + + @doc """ + Finds an existing node that matches the structure or creates a new one. + """ + @spec find_or_create_node( + variable :: term(), + yes_id :: non_neg_integer(), + no_id :: non_neg_integer(), + dc_id :: non_neg_integer() + ) :: non_neg_integer() + def find_or_create_node(variable, yes_id, no_id, dc_id) do + if yes_id == no_id && yes_id == dc_id do + yes_id + else + node_tuple = {variable, yes_id, no_id, dc_id} + nodes = Process.get(@nodes_key, %{}) + + case Map.get(nodes, node_tuple) do + id when is_integer(id) -> + id + + nil -> + next_id = Process.get(@next_id_key) + node_by_id = Process.get(@node_by_id_key) + + Process.put(@nodes_key, Map.put(nodes, node_tuple, next_id)) + Process.put(@node_by_id_key, Map.put(node_by_id, next_id, node_tuple)) + Process.put(@next_id_key, next_id + 1) + + next_id + end + end + end + + @doc "Retrieves a result from the operation cache." + @spec get_op_cache(term()) :: {:ok, term()} | :not_found + def get_op_cache(cache_key) do + case Process.get(@op_cache_key, %{}) do + %{^cache_key => result} -> {:ok, result} + %{} -> :not_found + end + end + + @doc "Puts a result into the operation cache." + @spec put_op_cache(term(), term()) :: :ok + def put_op_cache(cache_key, result) do + cache = Process.get(@op_cache_key, %{}) + Process.put(@op_cache_key, Map.put(cache, cache_key, result)) + :ok + end + + @doc """ + Creates a unique, temporary placeholder node for a recursive spec. + Returns the ID of this placeholder. + """ + @spec create_placeholder(TypeSpec.t()) :: non_neg_integer() + def create_placeholder(spec) do + find_or_create_node({:placeholder, spec}, 1, 0, 0) + end + + @doc """ + Updates a node's details directly. Used for knot-tying. + """ + @spec update_node_in_place( + non_neg_integer(), + new_details :: + {:ok, + {term(), non_neg_integer(), non_neg_integer(), non_neg_integer()} + | :true_terminal + | :false_terminal} + ) :: :ok + def update_node_in_place(id, {:ok, new_details}) do + nodes = Process.get(@nodes_key) + node_by_id = Process.get(@node_by_id_key) + + old_details = Map.get(node_by_id, id) + nodes = Map.delete(nodes, old_details) + + nodes = + case new_details do + {_v, _y, _n, _d} -> Map.put(nodes, new_details, id) + _ -> nodes + end + + node_by_id = Map.put(node_by_id, id, new_details) + + Process.put(@nodes_key, nodes) + Process.put(@node_by_id_key, node_by_id) + :ok + end +end + +defmodule Tdd.Variable do + @moduledoc """ + Defines the canonical structure for all Tdd predicate variables. + REFAC: This module is unchanged, but its functions for recursive types will + now be called with TDD IDs instead of TypeSpecs by the Tdd.Compiler. + """ + + # --- Category 0: Primary Type Discriminators --- + @spec v_is_atom() :: term() + def v_is_atom, do: {0, :is_atom, nil, nil} + + @spec v_is_integer() :: term() + def v_is_integer, do: {0, :is_integer, nil, nil} + + @spec v_is_list() :: term() + def v_is_list, do: {0, :is_list, nil, nil} + + @spec v_is_tuple() :: term() + def v_is_tuple, do: {0, :is_tuple, nil, nil} + + # --- Category 1: Atom Properties --- + @spec v_atom_eq(atom()) :: term() + def v_atom_eq(atom_val) when is_atom(atom_val), do: {1, :value, atom_val, nil} + + # --- Category 2: Integer Properties --- + @spec v_int_lt(integer()) :: term() + def v_int_lt(n) when is_integer(n), do: {2, :alt, n, nil} + + @spec v_int_eq(integer()) :: term() + def v_int_eq(n) when is_integer(n), do: {2, :beq, n, nil} + + @spec v_int_gt(integer()) :: term() + def v_int_gt(n) when is_integer(n), do: {2, :cgt, n, nil} + + # --- Category 4: Tuple Properties --- + @spec v_tuple_size_eq(non_neg_integer()) :: term() + def v_tuple_size_eq(size) when is_integer(size) and size >= 0, do: {4, :a_size, size, nil} + + @doc "Applies a predicate to a tuple element. The predicate is now represented by its TDD ID." + @spec v_tuple_elem_pred(non_neg_integer(), sub_problem_tdd_id :: non_neg_integer()) :: term() + def v_tuple_elem_pred(index, sub_problem_tdd_id) + when is_integer(index) and index >= 0 and is_integer(sub_problem_tdd_id) do + # REFAC: The nested term is now a TDD ID, not a spec or a variable. + {4, :b_element, index, sub_problem_tdd_id} + end + + # --- Category 5: List Properties --- + @doc "Predicate: The list is the empty list `[]`." + @spec v_list_is_empty() :: term() + def v_list_is_empty, do: {5, :b_is_empty, nil, nil} + + @doc "Applies a predicate to the head. The predicate is now represented by its TDD ID." + @spec v_list_head_pred(sub_problem_tdd_id :: non_neg_integer()) :: term() + def v_list_head_pred(sub_problem_tdd_id) when is_integer(sub_problem_tdd_id), + do: {5, :c_head, sub_problem_tdd_id, nil} + + @doc "Applies a predicate to the tail. The predicate is now represented by its TDD ID." + @spec v_list_tail_pred(sub_problem_tdd_id :: non_neg_integer()) :: term() + def v_list_tail_pred(sub_problem_tdd_id) when is_integer(sub_problem_tdd_id), + do: {5, :d_tail, sub_problem_tdd_id, nil} +end + +defmodule Tdd.Predicate.Info do + # NOTE: This module remains largely unchanged. The traits for recursive variables + # correctly identify them by structure, independent of what's inside. + @moduledoc "A knowledge base for the properties of TDD predicate variables." + alias Tdd.Variable + + @doc "Returns a map of traits for a given predicate variable." + @spec get_traits(term()) :: map() | nil + + def get_traits({0, :is_atom, _, _}) do + %{ + type: :primary, + category: :atom, + implies: [ + {Variable.v_is_integer(), false}, + {Variable.v_is_list(), false}, + {Variable.v_is_tuple(), false} + ] + } + end + + def get_traits({0, :is_integer, _, _}) do + %{ + type: :primary, + category: :integer, + implies: [ + {Variable.v_is_atom(), false}, + {Variable.v_is_list(), false}, + {Variable.v_is_tuple(), false} + ] + } + end + + def get_traits({0, :is_list, _, _}) do + %{ + type: :primary, + category: :list, + implies: [ + {Variable.v_is_atom(), false}, + {Variable.v_is_integer(), false}, + {Variable.v_is_tuple(), false} + ] + } + end + + def get_traits({0, :is_tuple, _, _}) do + %{ + type: :primary, + category: :tuple, + implies: [ + {Variable.v_is_atom(), false}, + {Variable.v_is_integer(), false}, + {Variable.v_is_list(), false} + ] + } + end + + def get_traits({1, :value, _val, _}) do + %{type: :atom_value, category: :atom, implies: [{Variable.v_is_atom(), true}]} + end + + def get_traits({2, :alt, _, _}), + do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]} + + def get_traits({2, :beq, _, _}), + do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]} + + def get_traits({2, :cgt, _, _}), + do: %{type: :integer_prop, category: :integer, implies: [{Variable.v_is_integer(), true}]} + + def get_traits({4, :a_size, _, _}) do + %{type: :tuple_prop, category: :tuple, implies: [{Variable.v_is_tuple(), true}]} + end + + # REFAC: The trait recognizes the structure. The content `_tdd_id` is opaque here. + def get_traits({4, :b_element, index, _tdd_id}) do + %{ + type: :tuple_recursive, + category: :tuple, + sub_key: {:elem, index}, + implies: [{Variable.v_is_tuple(), true}] + } + end + + def get_traits({5, :b_is_empty, _, _}) do + %{type: :list_prop, category: :list, implies: [{Variable.v_is_list(), true}]} + end + + # REFAC: The trait recognizes the structure. The content `_tdd_id` is opaque here. + def get_traits({5, :c_head, _tdd_id, _}) do + %{ + type: :list_recursive, + category: :list, + sub_key: :head, + implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}] + } + end + + # REFAC: The trait recognizes the structure. The content `_tdd_id` is opaque here. + def get_traits({5, :d_tail, _tdd_id, _}) do + %{ + type: :list_recursive, + category: :list, + sub_key: :tail, + implies: [{Variable.v_is_list(), true}, {Variable.v_list_is_empty(), false}] + } + end + + def get_traits(_), do: nil +end + +defmodule Tdd.Consistency.Engine do + @moduledoc """ + A rule-based engine for checking the semantic consistency of a set of assumptions. + REFAC: This module is largely unchanged, but we now make `remap_sub_problem_vars` + and `unwrap_var` public so they can be shared with Tdd.Algo. + """ + alias Tdd.Predicate.Info + alias Tdd.Variable + + @doc "Checks if a map of assumptions is logically consistent." + @spec check(map()) :: :consistent | :contradiction + def check(assumptions), do: do_check(assumptions) + + @doc "Expands a map of assumptions with all their logical implications." + @spec expand(map()) :: {:ok, map()} | {:error, :contradiction} + def expand(assumptions), do: expand_with_implications(assumptions) + + # --- The Core Recursive Checker --- + defp do_check(assumptions) do + with {:ok, expanded} <- expand_with_implications(assumptions), + :ok <- check_flat_consistency(expanded) do + sub_problems = + expanded + |> Enum.group_by(fn {var, _val} -> (Info.get_traits(var) || %{})[:sub_key] end) + |> Map.drop([nil]) + + if map_size(sub_problems) == 0 do + :consistent + else + Enum.find_value(sub_problems, :consistent, fn {_sub_key, sub_assumptions_list} -> + remapped_assumptions = remap_sub_problem_vars(sub_assumptions_list) + + case do_check(remapped_assumptions) do + :consistent -> nil + :contradiction -> :contradiction + end + end) + end + else + {:error, _reason} -> :contradiction + end + end + + # --- Recursive Checking Helpers --- + + @doc "Converts a list of scoped assumptions into a map of base assumptions for a sub-problem." + @spec remap_sub_problem_vars([{term(), boolean()}]) :: map() + def remap_sub_problem_vars(assumptions_list) do + Map.new(assumptions_list, fn {var, val} -> + {unwrap_var(var), val} + end) + end + + @doc "Extracts the inner content from a recursive variable." + @spec unwrap_var(term()) :: term() + def unwrap_var(var) do + case var do + # REFAC: These variables now contain TDD IDs, but this function just extracts + # whatever is inside. The consumer (`handle_recursive_subproblem`) will know it's an ID. + {4, :b_element, _index, inner_content} -> inner_content + {5, :c_head, inner_content, _} -> inner_content + {5, :d_tail, inner_content, _} -> inner_content + other -> other + end + end + + # --- Implication Expansion (Unchanged) --- + defp expand_with_implications(assumptions) do + expand_loop(assumptions, assumptions) + end + + defp expand_loop(new_assumptions, all_assumptions) do + implications = + Enum.flat_map(new_assumptions, fn + {var, true} -> Map.get(Info.get_traits(var) || %{}, :implies, []) + _ -> [] + end) + + case Enum.reduce(implications, {:ok, %{}}, fn {implied_var, implied_val}, acc -> + reduce_implication({implied_var, implied_val}, all_assumptions, acc) + end) do + {:error, :contradiction} = err -> + err + + {:ok, newly_added} when map_size(newly_added) == 0 -> + {:ok, all_assumptions} + + {:ok, newly_added} -> + expand_loop(newly_added, Map.merge(all_assumptions, newly_added)) + end + end + + defp reduce_implication({var, val}, all_assumptions, {:ok, new_acc}) do + case Map.get(all_assumptions, var) do + nil -> {:ok, Map.put(new_acc, var, val)} + ^val -> {:ok, new_acc} + _other_val -> {:error, :contradiction} + end + end + + defp reduce_implication(_implication, _all_assumptions, error_acc), do: error_acc + + # --- Flat Consistency Checks (Unchanged) --- + defp check_flat_consistency(assumptions) do + with :ok <- check_primary_type_exclusivity(assumptions), + :ok <- check_atom_consistency(assumptions), + :ok <- check_list_consistency(assumptions), + :ok <- check_integer_consistency(assumptions), + :ok <- check_tuple_consistency(assumptions) do + :ok + else + :error -> {:error, :consistency_error} + end + end + + defp check_primary_type_exclusivity(assumptions) do + primary_types = [ + Variable.v_is_atom(), + Variable.v_is_integer(), + Variable.v_is_list(), + Variable.v_is_tuple() + ] + + true_primary_types = Enum.count(primary_types, &(Map.get(assumptions, &1) == true)) + + if true_primary_types > 1, do: :error, else: :ok + end + + defp check_atom_consistency(assumptions) do + true_atom_values = + Enum.reduce(assumptions, MapSet.new(), fn + {{1, :value, atom_val, _}, true}, acc -> MapSet.put(acc, atom_val) + _, acc -> acc + end) + + if MapSet.size(true_atom_values) > 1, do: :error, else: :ok + end + + defp check_tuple_consistency(assumptions) do + true_tuple_sizes = + Enum.reduce(assumptions, MapSet.new(), fn + {{4, :a_size, size, _}, true}, acc -> MapSet.put(acc, size) + _, acc -> acc + end) + + if MapSet.size(true_tuple_sizes) > 1, do: :error, else: :ok + end + + defp check_list_consistency(assumptions) do + is_empty = Map.get(assumptions, Variable.v_list_is_empty()) == true + has_head_prop = Enum.any?(assumptions, &match?({{5, :c_head, _, _}, true}, &1)) + has_tail_prop = Enum.any?(assumptions, &match?({{5, :d_tail, _, _}, true}, &1)) + + if is_empty and (has_head_prop or has_tail_prop), do: :error, else: :ok + end + + defp check_integer_consistency(assumptions) do + initial_range = {:neg_inf, :pos_inf} + + result = + Enum.reduce_while(assumptions, initial_range, fn assumption, {min, max} -> + case assumption do + {{2, :alt, n, _}, true} -> narrow_range(min, safe_min(max, n - 1)) + {{2, :alt, n, _}, false} -> narrow_range(safe_max(min, n), max) + {{2, :beq, n, _}, true} -> narrow_range(safe_max(min, n), safe_min(max, n)) + {{2, :beq, n, _}, false} when min == n and max == n -> {:halt, :invalid} + {{2, :cgt, n, _}, true} -> narrow_range(safe_max(min, n + 1), max) + {{2, :cgt, n, _}, false} -> narrow_range(min, safe_min(max, n)) + _ -> {:cont, {min, max}} + end + end) + + case result, + do: ( + :invalid -> :error + _ -> :ok + ) + end + + defp narrow_range(min, max) do + is_invalid = + case {min, max} do + {:neg_inf, _} -> false + {_, :pos_inf} -> false + {m, n} when is_integer(m) and is_integer(n) -> m > n + _ -> false + end + + if is_invalid, do: {:halt, :invalid}, else: {:cont, {min, max}} + end + + defp safe_max(:neg_inf, x), do: x + defp safe_max(x, :neg_inf), do: x + defp safe_max(:pos_inf, _), do: :pos_inf + defp safe_max(_, :pos_inf), do: :pos_inf + defp safe_max(a, b), do: :erlang.max(a, b) + + defp safe_min(:pos_inf, x), do: x + defp safe_min(x, :pos_inf), do: x + defp safe_min(:neg_inf, _), do: :neg_inf + defp safe_min(_, :neg_inf), do: :neg_inf + defp safe_min(a, b), do: :erlang.min(a, b) +end + +defmodule Tdd.Algo do + @moduledoc """ + Implements the core, stateless algorithms for TDD manipulation. + """ + use Tdd.Debug + alias Tdd.Store + alias Tdd.Consistency.Engine + alias Tdd.Debug + + # --- Binary Operation: Apply --- + # This function is correct and does not need to be changed. + @spec apply(atom, (atom, atom -> atom), non_neg_integer, non_neg_integer) :: non_neg_integer + def apply(op_name, op_lambda, u1_id, u2_id) do + cache_key = {:apply, op_name, Enum.sort([u1_id, u2_id])} + + case Store.get_op_cache(cache_key) do + {:ok, result_id} -> + result_id + + :not_found -> + result_id = do_apply(op_name, op_lambda, u1_id, u2_id) + Store.put_op_cache(cache_key, result_id) + result_id + end + end + + # This function is correct and does not need to be changed. + defp do_apply(op_name, op_lambda, u1_id, u2_id) do + with {:ok, u1_details} <- Store.get_node(u1_id), + {:ok, u2_details} <- Store.get_node(u2_id) do + cond do + (u1_details == :true_terminal or u1_details == :false_terminal) and + (u2_details == :true_terminal or u2_details == :false_terminal) -> + if op_lambda.(u1_details, u2_details) == :true_terminal, + do: Store.true_node_id(), + else: Store.false_node_id() + + u1_details == :true_terminal or u1_details == :false_terminal -> + {var2, y2, n2, d2} = u2_details + + Store.find_or_create_node( + var2, + apply(op_name, op_lambda, u1_id, y2), + apply(op_name, op_lambda, u1_id, n2), + apply(op_name, op_lambda, u1_id, d2) + ) + + u2_details == :true_terminal or u2_details == :false_terminal -> + {var1, y1, n1, d1} = u1_details + + Store.find_or_create_node( + var1, + apply(op_name, op_lambda, y1, u2_id), + apply(op_name, op_lambda, n1, u2_id), + apply(op_name, op_lambda, d1, u2_id) + ) + + true -> + {var1, y1, n1, d1} = u1_details + {var2, y2, n2, d2} = u2_details + top_var = Enum.min([var1, var2]) + + res_y = + apply( + op_name, + op_lambda, + if(var1 == top_var, do: y1, else: u1_id), + if(var2 == top_var, do: y2, else: u2_id) + ) + + res_n = + apply( + op_name, + op_lambda, + if(var1 == top_var, do: n1, else: u1_id), + if(var2 == top_var, do: n2, else: u2_id) + ) + + res_d = + apply( + op_name, + op_lambda, + if(var1 == top_var, do: d1, else: u1_id), + if(var2 == top_var, do: d2, else: u2_id) + ) + + Store.find_or_create_node(top_var, res_y, res_n, res_d) + end + end + end + + # --- Unary Operation: Negation --- + # This function is correct and does not need to be changed. + @spec negate(non_neg_integer) :: non_neg_integer + def negate(tdd_id) do + cache_key = {:negate, tdd_id} + + case Store.get_op_cache(cache_key) do + {:ok, result_id} -> + result_id + + :not_found -> + result_id = + case Store.get_node(tdd_id) do + {:ok, :true_terminal} -> + Store.false_node_id() + + {:ok, :false_terminal} -> + Store.true_node_id() + + {:ok, {var, y, n, d}} -> + Store.find_or_create_node(var, negate(y), negate(n), negate(d)) + end + + Store.put_op_cache(cache_key, result_id) + result_id + end + end + + # --- Unary Operation: Semantic Simplification --- + # This function is correct and does not need to be changed. + @spec simplify(non_neg_integer(), map()) :: non_neg_integer + def simplify(tdd_id, assumptions \\ %{}) do + sorted_assumptions = Enum.sort(assumptions) + cache_key = {:simplify, tdd_id, sorted_assumptions} + + case Store.get_op_cache(cache_key) do + {:ok, result_id} -> + result_id + + :not_found -> + result_id = do_simplify(tdd_id, sorted_assumptions, MapSet.new()) + Store.put_op_cache(cache_key, result_id) + result_id + end + end + + # This function is correct and does not need to be changed. + defp do_simplify(tdd_id, sorted_assumptions, context) do + current_state = {tdd_id, sorted_assumptions} + + if MapSet.member?(context, current_state) do + Store.true_node_id() + else + new_context = MapSet.put(context, current_state) + assumptions = Map.new(sorted_assumptions) + + if Engine.check(assumptions) == :contradiction do + Store.false_node_id() + else + case Store.get_node(tdd_id) do + {:ok, :true_terminal} -> + Store.true_node_id() + + {:ok, :false_terminal} -> + Store.false_node_id() + + {:ok, {var, y, n, d}} -> + # Dispatch to the handler for recursive variables. + case var do + {5, :c_head, constraint_id, _} -> + handle_recursive_subproblem( + :simplify, + :head, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + {5, :d_tail, constraint_id, _} -> + handle_recursive_subproblem( + :simplify, + :tail, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + {4, :b_element, index, constraint_id} -> + handle_recursive_subproblem( + :simplify, + {:elem, index}, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + _ -> + # The rest of the logic for standard variables is unchanged. + case Map.get(assumptions, var) do + true -> + do_simplify(y, sorted_assumptions, new_context) + + false -> + do_simplify(n, sorted_assumptions, new_context) + + :dc -> + do_simplify(d, sorted_assumptions, new_context) + + nil -> + assumptions_imply_true = + Engine.check(Map.put(assumptions, var, false)) == :contradiction + + assumptions_imply_false = + Engine.check(Map.put(assumptions, var, true)) == :contradiction + + cond do + assumptions_imply_true and assumptions_imply_false -> + Store.false_node_id() + + assumptions_imply_true -> + do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context) + + assumptions_imply_false -> + do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context) + + true -> + s_y = + do_simplify(y, Enum.sort(Map.put(assumptions, var, true)), new_context) + + s_n = + do_simplify(n, Enum.sort(Map.put(assumptions, var, false)), new_context) + + s_d = + do_simplify(d, Enum.sort(Map.put(assumptions, var, :dc)), new_context) + + Store.find_or_create_node(var, s_y, s_n, s_d) + end + end + end + end + end + end + end + + # --- Unary Operation: Substitute --- + # FIX: The implementation of substitute needs to change. + + @spec substitute(non_neg_integer(), non_neg_integer(), non_neg_integer()) :: non_neg_integer() + def substitute(root_id, from_id, to_id) do + if root_id == from_id, do: to_id, else: do_substitute(root_id, from_id, to_id) + end + + # This helper inspects and replaces TDD IDs embedded in predicate variables. + defp substitute_in_var(var, from_id, to_id) do + case var do + {4, :b_element, index, ^from_id} -> {4, :b_element, index, to_id} + {5, :c_head, ^from_id, nil} -> {5, :c_head, to_id, nil} + {5, :d_tail, ^from_id, nil} -> {5, :d_tail, to_id, nil} + _other -> var + end + end + + defp do_substitute(root_id, from_id, to_id) do + cache_key = {:substitute, root_id, from_id, to_id} + + case Store.get_op_cache(cache_key) do + {:ok, result_id} -> + result_id + + :not_found -> + result_id = + case Store.get_node(root_id) do + {:ok, :true_terminal} -> + Store.true_node_id() + + {:ok, :false_terminal} -> + Store.false_node_id() + + {:ok, {var, y, n, d}} -> + # FIX: Substitute within the variable term itself. + new_var = substitute_in_var(var, from_id, to_id) + new_y = substitute(y, from_id, to_id) + new_n = substitute(n, from_id, to_id) + new_d = substitute(d, from_id, to_id) + Store.find_or_create_node(new_var, new_y, new_n, new_d) + + {:error, reason} -> + raise "substitute encountered an error getting node #{root_id}: #{reason}" + end + + Store.put_op_cache(cache_key, result_id) + result_id + end + end + + # --- Coinductive Emptiness Check --- + # This function is correct and does not need to be changed. + @spec check_emptiness(non_neg_integer()) :: non_neg_integer() + def check_emptiness(tdd_id) do + cache_key = {:check_emptiness, tdd_id} + + case Store.get_op_cache(cache_key) do + {:ok, id} -> + id + + :not_found -> + assumptions_list = [] + result_id = do_check_emptiness(tdd_id, assumptions_list, MapSet.new()) + Store.put_op_cache(cache_key, result_id) + result_id + end + end + + # This function is correct and does not need to be changed. + defp do_check_emptiness(tdd_id, sorted_assumptions, context) do + current_state = {tdd_id, sorted_assumptions} + + if MapSet.member?(context, current_state) do + Store.false_node_id() + else + new_context = MapSet.put(context, current_state) + assumptions = Map.new(sorted_assumptions) + + if Engine.check(assumptions) == :contradiction do + Store.false_node_id() + else + case Store.get_node(tdd_id) do + {:ok, :true_terminal} -> + Store.true_node_id() + + {:ok, :false_terminal} -> + Store.false_node_id() + + {:ok, {var, y, n, d}} -> + # Dispatch to the handler for recursive variables. + case var do + {5, :c_head, constraint_id, _} -> + handle_recursive_subproblem( + :check_emptiness, + :head, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + {5, :d_tail, constraint_id, _} -> + handle_recursive_subproblem( + :check_emptiness, + :tail, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + {4, :b_element, index, constraint_id} -> + handle_recursive_subproblem( + :check_emptiness, + {:elem, index}, + constraint_id, + {var, y, n, d}, + sorted_assumptions, + new_context + ) + + _ -> + # The rest of the logic is the same as the do_simplify counterpart + case Map.get(assumptions, var) do + true -> + do_check_emptiness(y, sorted_assumptions, new_context) + + false -> + do_check_emptiness(n, sorted_assumptions, new_context) + + :dc -> + do_check_emptiness(d, sorted_assumptions, new_context) + + nil -> + assumptions_imply_true = + Engine.check(Map.put(assumptions, var, false)) == :contradiction + + assumptions_imply_false = + Engine.check(Map.put(assumptions, var, true)) == :contradiction + + cond do + assumptions_imply_true and assumptions_imply_false -> + Store.false_node_id() + + assumptions_imply_true -> + do_check_emptiness( + y, + Enum.sort(Map.put(assumptions, var, true)), + new_context + ) + + assumptions_imply_false -> + do_check_emptiness( + n, + Enum.sort(Map.put(assumptions, var, false)), + new_context + ) + + true -> + s_y = + do_check_emptiness( + y, + Enum.sort(Map.put(assumptions, var, true)), + new_context + ) + + s_n = + do_check_emptiness( + n, + Enum.sort(Map.put(assumptions, var, false)), + new_context + ) + + s_d = + do_check_emptiness( + d, + Enum.sort(Map.put(assumptions, var, :dc)), + new_context + ) + + Store.find_or_create_node(var, s_y, s_n, s_d) + end + end + end + end + end + end + end + + # This function, containing our previous fix, is correct and does not need to be changed. + defp handle_recursive_subproblem( + algo_type, + sub_key, + # This is a TDD ID for the constraint on the sub-problem. + constraint_id, + node_details, + sorted_assumptions, + # This is the coinductive context (a MapSet). + context + ) do + {var, y, n, d} = node_details + assumptions = Map.new(sorted_assumptions) + + # 1. Build the TDD for the sub-problem's effective type by intersecting all + # its constraints from the current assumption set. + op_intersect = fn + :false_terminal, _ -> :false_terminal + _, :false_terminal -> :false_terminal + t, :true_terminal -> t + :true_terminal, t -> t + end + + sub_problem_constraints = + Enum.filter(assumptions, fn {v, _} -> + (Tdd.Predicate.Info.get_traits(v) || %{})[:sub_key] == sub_key + end) + + sub_problem_tdd_id = + Enum.reduce(sub_problem_constraints, Store.true_node_id(), fn {var, val}, acc_id -> + constraint_for_this_assumption = Engine.unwrap_var(var) + + id_to_intersect = + if val, do: constraint_for_this_assumption, else: negate(constraint_for_this_assumption) + + apply(:intersect, op_intersect, acc_id, id_to_intersect) + end) + + # 2. Check for the three logical outcomes: + # - Does the path imply the constraint is satisfied? + # - Does the path imply the constraint is violated? + # - Or are both outcomes still possible? + + # Implies satisfied: `sub_problem_tdd_id <: constraint_id` + # This is equivalent to `(sub_problem_tdd_id & !constraint_id)` being empty. + neg_constraint_id = negate(constraint_id) + + intersect_sub_with_neg_constraint = + apply(:intersect, op_intersect, sub_problem_tdd_id, neg_constraint_id) + + implies_satisfied = + check_emptiness(intersect_sub_with_neg_constraint) == Store.false_node_id() + + # Implies violated: `sub_problem_tdd_id` and `constraint_id` are disjoint. + # This is equivalent to `(sub_problem_tdd_id & constraint_id)` being empty. + intersect_sub_with_constraint = + apply(:intersect, op_intersect, sub_problem_tdd_id, constraint_id) + + implies_violated = check_emptiness(intersect_sub_with_constraint) == Store.false_node_id() + + # 3. Branch based on the logical outcome. + cond do + implies_satisfied and implies_violated -> + # The sub-problem itself must be empty/impossible under the current assumptions. + # This whole path is a contradiction. + Store.false_node_id() + + implies_satisfied -> + # The constraint is guaranteed by the path. Follow the 'yes' branch. + # The assumptions already imply this, so no change to them is needed. + case algo_type do + :simplify -> do_simplify(y, sorted_assumptions, context) + :check_emptiness -> do_check_emptiness(y, sorted_assumptions, context) + end + + implies_violated -> + # The constraint is impossible given the path. Follow the 'no' branch. + # We can add this new fact `{var, false}` to strengthen the assumptions. + new_assumptions = Map.put(assumptions, var, false) |> Enum.sort() + + case algo_type do + :simplify -> do_simplify(n, new_assumptions, context) + :check_emptiness -> do_check_emptiness(n, new_assumptions, context) + end + + true -> + # Neither outcome is guaranteed. Both are possible. + # We must explore both branches and combine the results. + new_assumptions_for_no_branch = Map.put(assumptions, var, false) |> Enum.sort() + + case algo_type do + :check_emptiness -> + # Is there ANY path to true? Explore both and take their union. + res_y = do_check_emptiness(y, sorted_assumptions, context) + res_n = do_check_emptiness(n, new_assumptions_for_no_branch, context) + + # Define local terminal logic for union + op_union = fn + :true_terminal, _ -> :true_terminal + _, :true_terminal -> :true_terminal + t, :false_terminal -> t + :false_terminal, t -> t + end + + apply(:sum, op_union, res_y, res_n) + + :simplify -> + # Simplify both sub-trees and rebuild the node, as we cannot simplify this node away. + res_y = do_simplify(y, sorted_assumptions, context) + res_n = do_simplify(n, new_assumptions_for_no_branch, context) + # 'dc' branch is independent of 'var' + res_d = do_simplify(d, sorted_assumptions, context) + Store.find_or_create_node(var, res_y, res_n, res_d) + end + end + end +end + +defmodule Tdd.Compiler do + @moduledoc """ + Compiles a `TypeSpec` into a canonical TDD ID. + REFAC: This module now embeds TDD IDs into recursive predicate variables + instead of raw TypeSpecs, a key part of the architectural decoupling. + """ + alias Tdd.TypeSpec + alias Tdd.Variable + alias Tdd.Store + alias Tdd.Algo + alias Tdd.Debug + + @doc "The main public entry point. Takes a spec and returns its TDD ID." + @spec spec_to_id(TypeSpec.t()) :: non_neg_integer() + def spec_to_id(spec) do + # It's crucial to initialize the store for each top-level compilation + # to ensure a clean slate for caches and node IDs. This makes calls independent. + normalized_spec = TypeSpec.normalize(spec) + compile_normalized_spec(normalized_spec, %{}) + end + + defp compile_normalized_spec(normalized_spec, context) do + cache_key = {:spec_to_id, normalized_spec} + + case normalized_spec do + {:type_var, var_name} -> + case Map.get(context, var_name) do + nil -> + raise "Tdd.Compiler: Unbound type variable during TDD compilation: #{inspect(var_name)}. Full spec: #{inspect(normalized_spec)}. Context: #{inspect(context)}" + + placeholder_id when is_integer(placeholder_id) -> + placeholder_id + end + + _other_form -> + case Store.get_op_cache(cache_key) do + {:ok, id} -> + id + + :not_found -> + id_to_cache = + case normalized_spec do + {:mu, var_name, body_spec} -> + placeholder_node_variable_tag = {:mu_placeholder_for_var, var_name} + placeholder_id = Store.create_placeholder(placeholder_node_variable_tag) + new_context = Map.put(context, var_name, placeholder_id) + compiled_body_id = compile_normalized_spec(body_spec, new_context) + # The substitution is the "knot-tying" step for recursion + final_id = Algo.substitute(compiled_body_id, placeholder_id, compiled_body_id) + Algo.simplify(final_id) + + other -> + raw_id = do_structural_compile(other, context) + Algo.simplify(raw_id) + end + + Store.put_op_cache(cache_key, id_to_cache) + id_to_cache + end + end + end + + defp do_structural_compile(structural_spec, context) do + case structural_spec do + :any -> + Store.true_node_id() + + :none -> + Store.false_node_id() + + :atom -> + create_base_type_tdd(Variable.v_is_atom()) + + :integer -> + create_base_type_tdd(Variable.v_is_integer()) + + :list -> + create_base_type_tdd(Variable.v_is_list()) + + :tuple -> + create_base_type_tdd(Variable.v_is_tuple()) + + {:literal, val} when is_atom(val) -> + compile_value_equality(:atom, Variable.v_atom_eq(val), context) + + {:literal, val} when is_integer(val) -> + compile_value_equality(:integer, Variable.v_int_eq(val), context) + + {:literal, []} -> + compile_value_equality(:list, Variable.v_list_is_empty(), context) + + {:integer_range, min, max} -> + compile_integer_range(min, max, context) + + {:union, specs} -> + Enum.map(specs, &compile_normalized_spec(&1, context)) + |> Enum.reduce(Store.false_node_id(), fn id, acc -> + Algo.apply(:sum, &op_union_terminals/2, id, acc) + end) + + {:intersect, specs} -> + Enum.map(specs, &compile_normalized_spec(&1, context)) + |> Enum.reduce(Store.true_node_id(), fn id, acc -> + Algo.apply(:intersect, &op_intersect_terminals/2, id, acc) + end) + + {:negation, sub_spec} -> + Algo.negate(compile_normalized_spec(sub_spec, context)) + + # REFAC: This is a key change. We now compile sub-specs to TDD IDs + # and embed those IDs in the predicate variables. + {:cons, head_spec, tail_spec} -> + id_list = compile_normalized_spec(:list, context) + id_is_empty = create_base_type_tdd(Variable.v_list_is_empty()) + id_not_is_empty = Algo.negate(id_is_empty) + + non_empty_list_id = + Algo.apply(:intersect, &op_intersect_terminals/2, id_list, id_not_is_empty) + + # 1. Compile sub-specs to get their TDD IDs. + head_id = compile_normalized_spec(head_spec, context) + tail_id = compile_normalized_spec(tail_spec, context) + + # 2. Embed the TDD IDs into the variables. + head_checker_var = Variable.v_list_head_pred(head_id) + head_checker_tdd = create_base_type_tdd(head_checker_var) + + tail_checker_var = Variable.v_list_tail_pred(tail_id) + tail_checker_tdd = create_base_type_tdd(tail_checker_var) + + [non_empty_list_id, head_checker_tdd, tail_checker_tdd] + |> Enum.reduce(Store.true_node_id(), fn id, acc -> + Algo.apply(:intersect, &op_intersect_terminals/2, id, acc) + end) + + # REFAC: Same change for tuples. + {:tuple, elements_specs} -> + size = length(elements_specs) + base_id = compile_normalized_spec(:tuple, context) + size_tdd = create_base_type_tdd(Variable.v_tuple_size_eq(size)) + initial_id = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, size_tdd) + + elements_specs + |> Enum.with_index() + |> Enum.reduce(initial_id, fn {elem_spec, index}, acc_id -> + # 1. Compile element spec to get its TDD ID. + elem_id = compile_normalized_spec(elem_spec, context) + # 2. Embed the TDD ID into the variable. + elem_checker_var = Variable.v_tuple_elem_pred(index, elem_id) + elem_checker_tdd = create_base_type_tdd(elem_checker_var) + + Algo.apply(:intersect, &op_intersect_terminals/2, acc_id, elem_checker_tdd) + end) + + {:type_lambda, _, _} -> + raise "Tdd.Compiler: Cannot compile :type_lambda directly. Spec should be ground. Spec: #{inspect(structural_spec)}" + + {:type_apply, _, _} -> + raise "Tdd.Compiler: Cannot compile :type_apply directly. Spec should be ground and fully beta-reduced. Spec: #{inspect(structural_spec)}" + + _ -> + raise "Tdd.Compiler.do_structural_compile: Unhandled structural spec form: #{inspect(structural_spec)}" + end + end + + defp create_base_type_tdd(var), + do: + Store.find_or_create_node( + var, + Store.true_node_id(), + Store.false_node_id(), + Store.false_node_id() + ) + + defp compile_value_equality(base_type_spec, value_var, context) do + eq_node = create_base_type_tdd(value_var) + base_node_id = compile_normalized_spec(base_type_spec, context) + Algo.apply(:intersect, &op_intersect_terminals/2, base_node_id, eq_node) + end + + defp compile_integer_range(min, max, context) do + base_id = compile_normalized_spec(:integer, context) + + lt_min_tdd = if min != :neg_inf, do: create_base_type_tdd(Variable.v_int_lt(min)) + + gte_min_tdd = + if lt_min_tdd, do: Algo.negate(lt_min_tdd), else: compile_normalized_spec(:any, context) + + id_with_min = Algo.apply(:intersect, &op_intersect_terminals/2, base_id, gte_min_tdd) + + if max == :pos_inf do + id_with_min + else + lt_max_plus_1_tdd = create_base_type_tdd(Variable.v_int_lt(max + 1)) + Algo.apply(:intersect, &op_intersect_terminals/2, id_with_min, lt_max_plus_1_tdd) + end + end + + # --- Terminal Logic Helpers --- + defp op_union_terminals(:true_terminal, _), do: :true_terminal + defp op_union_terminals(_, :true_terminal), do: :true_terminal + defp op_union_terminals(t, :false_terminal), do: t + defp op_union_terminals(:false_terminal, t), do: t + defp op_intersect_terminals(:false_terminal, _), do: :false_terminal + defp op_intersect_terminals(_, :false_terminal), do: :false_terminal + defp op_intersect_terminals(t, :true_terminal), do: t + defp op_intersect_terminals(:true_terminal, t), do: t + + # --- Public Subtyping Check --- + @doc "Checks if spec1 is a subtype of spec2 using TDDs." + @spec is_subtype(TypeSpec.t(), TypeSpec.t()) :: boolean + def is_subtype(spec1, spec2) do + id1 = spec_to_id(spec1) + id2 = spec_to_id(spec2) + neg_id2 = Algo.negate(id2) + intersect_id = Algo.apply(:intersect, &op_intersect_terminals/2, id1, neg_id2) + final_id = Algo.check_emptiness(intersect_id) + final_id == Store.false_node_id() end end diff --git a/lib/til/parser.ex b/lib/til/parser.ex index 56bfd6e..e1c4534 100644 --- a/lib/til/parser.ex +++ b/lib/til/parser.ex @@ -193,6 +193,7 @@ defmodule Til.Parser do case parse_atom_datum(source, state, parent_id) do {:ok, node_id, rest, new_state} -> {:ok, node_id, rest, new_state} + {:error, :not_atom} -> # Failed to parse as a specific atom (e.g. ":foo"). # It could be a symbol that starts with ':' (e.g. if we allow ":" as a symbol). @@ -200,9 +201,16 @@ defmodule Til.Parser do case parse_symbol_datum(source, state, parent_id) do {:ok, node_id, rest, new_state} -> {:ok, node_id, rest, new_state} + {:error, :not_symbol} -> # If it started with ':' but wasn't a valid atom and also not a valid symbol - create_error_node_and_advance(source, state, parent_id, 1, "Unknown token starting with ':'") + create_error_node_and_advance( + source, + state, + parent_id, + 1, + "Unknown token starting with ':'" + ) end end @@ -212,18 +220,24 @@ defmodule Til.Parser do case parse_integer_datum(source, state, parent_id) do {:ok, node_id, rest, new_state} -> {:ok, node_id, rest, new_state} + {:error, :not_integer} -> # Not an integer, try parsing as a symbol case parse_symbol_datum(source, state, parent_id) do {:ok, node_id, rest, new_state} -> {:ok, node_id, rest, new_state} + {:error, :not_symbol} -> # Not a symbol either. Consume 1 char for the unknown token. create_error_node_and_advance(source, state, parent_id, 1, "Unknown token") end end - end # end inner cond - end # end outer cond + end + + # end inner cond + end + + # end outer cond end # --- Datum Parsing Helpers --- (parse_string_datum, process_string_content) @@ -283,7 +297,7 @@ defmodule Til.Parser do raw_token = opening_tick <> content_segment <> closing_tick rest_of_source = - String.slice(source_after_opening_tick, (idx_closing_tick_in_segment + 1)..-1) + String.slice(source_after_opening_tick, (idx_closing_tick_in_segment + 1)..-1//1) state_at_node_end = advance_pos(initial_state_for_token, raw_token) @@ -339,7 +353,7 @@ defmodule Til.Parser do |> String.length() spaces_to_remove = min(current_leading_spaces_count, strip_indent) - String.slice(line, spaces_to_remove..-1) + String.slice(line, spaces_to_remove..-1//1) end) all_processed_lines = [first_line | processed_rest_lines] @@ -356,9 +370,10 @@ defmodule Til.Parser do # The colon itself is part of the atom's raw string. # The `atom_name_part` is what comes after the colon. case Regex.run(~r/^:([^\s\(\)\[\]\{\}]+)/, source) do - [raw_atom_str, atom_name_part] -> # raw_atom_str is like ":foo", atom_name_part is "foo" + # raw_atom_str is like ":foo", atom_name_part is "foo" + [raw_atom_str, atom_name_part] -> # The regex [^...]+ ensures atom_name_part is not empty. - rest_after_atom = String.slice(source, String.length(raw_atom_str)..-1) + rest_after_atom = String.slice(source, String.length(raw_atom_str)..-1//1) start_offset = state.offset start_line = state.line start_col = state.col @@ -387,9 +402,11 @@ defmodule Til.Parser do line: end_line, col: end_col } + {:ok, new_node_id, rest_after_atom, final_state} - _ -> # No match (nil or list that doesn't conform, e.g., just ":" or ": followed by space/delimiter") + # No match (nil or list that doesn't conform, e.g., just ":" or ": followed by space/delimiter") + _ -> {:error, :not_atom} end end @@ -426,7 +443,7 @@ defmodule Til.Parser do # Regex excludes common delimiters. `m{` is handled before symbol parsing. case Regex.run(~r/^([^\s\(\)\[\]\{\}]+)/, source) do [raw_symbol | _] -> - rest_after_symbol = String.slice(source, String.length(raw_symbol)..-1) + rest_after_symbol = String.slice(source, String.length(raw_symbol)..-1//1) start_offset = state.offset start_line = state.line start_col = state.col @@ -492,17 +509,18 @@ defmodule Til.Parser do defp parse_s_expression(original_source_string, source, state, parent_id) do # Standard S-expression parsing via parse_collection - result = parse_collection( - original_source_string, - source, - state, - parent_id, - "(", - ")", - :s_expression, - "Unclosed S-expression", - "Error parsing element in S-expression. Content might be incomplete." - ) + result = + parse_collection( + original_source_string, + source, + state, + parent_id, + "(", + ")", + :s_expression, + "Unclosed S-expression", + "Error parsing element in S-expression. Content might be incomplete." + ) # After parsing, check if it's an 'fn' expression case result do @@ -515,8 +533,7 @@ defmodule Til.Parser do final_state = %{ state_after_collection - | nodes: - Map.put(state_after_collection.nodes, transformed_node.id, transformed_node) + | nodes: Map.put(state_after_collection.nodes, transformed_node.id, transformed_node) } {:ok, transformed_node.id, rest_after_collection, final_state} @@ -548,7 +565,8 @@ defmodule Til.Parser do # into a :lambda_expression node. defp transform_to_lambda_expression(s_expr_node, nodes_map) do # s_expr_node.children = [fn_symbol_id, params_s_expr_id, body_form1_id, ...] - _fn_symbol_id = Enum.at(s_expr_node.children, 0) # Already checked + # Already checked + _fn_symbol_id = Enum.at(s_expr_node.children, 0) if length(s_expr_node.children) < 2 do %{s_expr_node | parsing_error: "Malformed 'fn' expression: missing parameters list."} @@ -557,7 +575,11 @@ defmodule Til.Parser do params_s_expr_node = Map.get(nodes_map, params_s_expr_id) if !(params_s_expr_node && params_s_expr_node.ast_node_type == :s_expression) do - Map.put(s_expr_node, :parsing_error, "Malformed 'fn' expression: parameters list is not an S-expression.") + Map.put( + s_expr_node, + :parsing_error, + "Malformed 'fn' expression: parameters list is not an S-expression." + ) else # Children of the parameters S-expression, e.g. for (fn ((a integer) (b atom) atom) ...), # param_s_expr_children_ids would be IDs of [(a integer), (b atom), atom] @@ -579,33 +601,50 @@ defmodule Til.Parser do all_arg_specs_valid = Enum.all?(arg_spec_node_ids, fn arg_id -> arg_node = Map.get(nodes_map, arg_id) + case arg_node do - %{ast_node_type: :symbol} -> true # e.g. x - %{ast_node_type: :s_expression, children: s_children} -> # e.g. (x integer) + # e.g. x + %{ast_node_type: :symbol} -> + true + + # e.g. (x integer) + %{ast_node_type: :s_expression, children: s_children} -> if length(s_children) == 2 do param_sym_node = Map.get(nodes_map, hd(s_children)) type_spec_node = Map.get(nodes_map, hd(tl(s_children))) param_sym_node && param_sym_node.ast_node_type == :symbol && - type_spec_node && (type_spec_node.ast_node_type == :symbol || type_spec_node.ast_node_type == :s_expression) + type_spec_node && + (type_spec_node.ast_node_type == :symbol || + type_spec_node.ast_node_type == :s_expression) else - false # Not a valid (param_symbol type_spec) structure + # Not a valid (param_symbol type_spec) structure + false end - _ -> false # Not a symbol or valid S-expression for arg spec + + # Not a symbol or valid S-expression for arg spec + _ -> + false end end) # Validate return_type_spec_node_id: must be nil or a valid type specifier node return_type_spec_valid = if is_nil(return_type_spec_node_id) do - true # Inferred return type is valid + # Inferred return type is valid + true else ret_type_node = Map.get(nodes_map, return_type_spec_node_id) - ret_type_node && (ret_type_node.ast_node_type == :symbol || ret_type_node.ast_node_type == :s_expression) + + ret_type_node && + (ret_type_node.ast_node_type == :symbol || + ret_type_node.ast_node_type == :s_expression) end if all_arg_specs_valid && return_type_spec_valid do - body_node_ids = Enum.drop(s_expr_node.children, 2) # Body starts after 'fn' and params_s_expr + # Body starts after 'fn' and params_s_expr + body_node_ids = Enum.drop(s_expr_node.children, 2) + Map.merge(s_expr_node, %{ :ast_node_type => :lambda_expression, :params_s_expr_id => params_s_expr_id, @@ -617,10 +656,17 @@ defmodule Til.Parser do # Determine more specific error message error_message = cond do - !all_arg_specs_valid -> "Malformed 'fn' expression: invalid argument specification(s)." - !return_type_spec_valid -> "Malformed 'fn' expression: invalid return type specification." - true -> "Malformed 'fn' expression." # Generic fallback + !all_arg_specs_valid -> + "Malformed 'fn' expression: invalid argument specification(s)." + + !return_type_spec_valid -> + "Malformed 'fn' expression: invalid return type specification." + + # Generic fallback + true -> + "Malformed 'fn' expression." end + Map.put(s_expr_node, :parsing_error, error_message) end end @@ -931,7 +977,7 @@ defmodule Til.Parser do [ws | _] = whitespace_match new_offset = o + String.length(ws) {new_line, new_col} = calculate_new_line_col(ws, l, c) - remaining_source = String.slice(source, String.length(ws)..-1) + remaining_source = String.slice(source, String.length(ws)..-1//1) {:ok, remaining_source, %{state | offset: new_offset, line: new_line, col: new_col}} else if String.length(source) == 0 do diff --git a/lib/til/type.ex b/lib/til/type.ex index 3b14ad9..f417181 100644 --- a/lib/til/type.ex +++ b/lib/til/type.ex @@ -1,817 +1,817 @@ -defmodule Tilly.X.Type do - @moduledoc """ - Core type system definitions for Tilly — a Lisp that transpiles to Elixir, - using set-theoretic types represented as Ternary Decision Diagrams (TDDs). - - Supports: - - Set-theoretic types (union, intersection, negation) - - Structural polymorphism with `forall` - - Type constraints (e.g., Enumerable(~a)) - - Structural map types - """ - - # === Monotype TDD Representation === - - defmodule TDD do - @moduledoc """ - Represents a ternary decision diagram node for types. - """ - - defstruct [:decision, :yes, :no, :maybe] - - @type t :: %__MODULE__{ - decision: Tilly.Type.Decision.t(), - yes: TDD.t() | :any | :none, - no: TDD.t() | :any | :none, - maybe: TDD.t() | :any | :none - } - end - - # === Type Variable === - - defmodule Var do - @moduledoc """ - Represents a type variable in a polymorphic type. - """ - - defstruct [:name, constraints: []] - - @type t :: %__MODULE__{ - name: String.t(), - constraints: [Tilly.Type.Constraint.t()] - } - end - - # === Structural Map Type === - - defmodule TDDMap do - @moduledoc """ - Structural representation of a map type, with per-key typing and optional openness. - """ - - defstruct fields: [], rest: nil - - @type t :: %__MODULE__{ - fields: [{TDD.t(), TDD.t()}], - rest: TDD.t() | nil - } - end - - @doc """ - Checks if t1 is a subtype of t2 under the current substitution. - t1 <: t2 iff t1 & (not t2) == None - """ - def is_subtype(raw_t1, raw_t2, sub) do - # Use the apply_sub we defined/refined previously - t1 = tdd_substitute(raw_t1, sub) - t2 = tdd_substitute(raw_t2, sub) - - # Handle edge cases with Any and None for robustness - cond do - # None is a subtype of everything - t1 == tdd_none() -> - true - - # Everything is a subtype of Any - t2 == tdd_any() -> - true - - # Any is not a subtype of a specific type (unless that type is also Any) - t1 == tdd_any() and t2 != tdd_any() -> - false - - # A non-None type cannot be a subtype of None - t2 == tdd_none() and t1 != tdd_none() -> - false - - true -> - # The core set-theoretic check: t1 \ t2 == None - tdd_diff(t1, t2) == tdd_none() - - # Alternatively: Type.tdd_and(t1, t2) == t1 (but this can be tricky with complex TDDs if not canonical) - # The difference check is generally more direct for subtyping. - end - end - - # === Type Decisions (Predicates) === - - defmodule Decision do - @moduledoc """ - A type-level decision predicate used in a TDD node. - """ - - @type t :: - :is_atom - | :is_integer - | :is_float - | :is_binary - | :is_list - | :is_tuple - | :is_map - | :is_function - | :is_pid - | :is_reference - | {:literal, term()} - | {:tuple_len, pos_integer()} - | {:key, TDD.t()} - | {:has_struct_key, atom()} - | {:var, String.t()} - end - - # === Type Constraints (structural predicates) === - - defmodule Constraint do - @moduledoc """ - Represents a structural constraint on a type variable, - similar to a typeclass in Haskell or trait in Rust, but structural. - """ - - defstruct [:kind, :arg] - - @type kind :: - :enumerable - | :collectable - | :struct_with_keys - | :custom - - @type t :: %__MODULE__{ - kind: kind(), - arg: String.t() | TDD.t() | any() - } - end - - # === Polymorphic Types (forall + constraints) === - - defmodule PolyTDD do - @moduledoc """ - Represents a polymorphic type with optional structural constraints. - """ - - defstruct [:vars, :body] - - @type t :: %__MODULE__{ - vars: [Var.t()], - body: TDD.t() - } - end - - # === Constants for base types === - - @doc "A TDD representing the universal type (any value)" - def tdd_any, do: :any - - @doc "A TDD representing the empty type (no values)" - def tdd_none, do: :none - - @doc "Creates a TDD for a literal value" - def tdd_literal(value) do - %TDD{ - decision: {:literal, value}, - yes: :any, - no: :none, - maybe: :none - } - end - - @doc "Creates a TDD for a base predicate (e.g., is_atom)" - def tdd_pred(pred) when is_atom(pred) do - %TDD{ - decision: pred, - yes: :any, - no: :none, - maybe: :none - } - end - - @doc "Creates a TDD for a type variable reference" - def tdd_var(name) when is_binary(name) do - %TDD{ - decision: {:var, name}, - yes: :any, - no: :none, - maybe: :none - } - end - - @doc """ - Performs type variable substitution in a TDD, - replacing variables found in the given `env` map. - """ - def tdd_substitute(:any, _env), do: :any - def tdd_substitute(:none, _env), do: :none - - def tdd_substitute(%TDD{decision: {:var, name}}, env) when is_map(env) do - Map.get(env, name, %TDD{decision: {:var, name}, yes: :any, no: :none, maybe: :none}) - end - - def tdd_substitute(%TDD{} = tdd, env) do - %TDD{ - decision: tdd.decision, - yes: tdd_substitute(tdd.yes, env), - no: tdd_substitute(tdd.no, env), - maybe: tdd_substitute(tdd.maybe, env) - } - end - - @doc """ - Performs substitution in a polymorphic type, replacing all vars - in `vars` with given TDDs from `env`. - """ - def poly_substitute(%PolyTDD{vars: vars, body: body}, env) do - var_names = Enum.map(vars, & &1.name) - restricted_env = Map.take(env, var_names) - tdd_substitute(body, restricted_env) - end - - # === Constraints === - - @doc """ - Checks whether a TDD satisfies a built-in structural constraint, - such as Enumerable or String.Chars. - """ - def satisfies_constraint?(tdd, %Constraint{kind: :enumerable}) do - tdd_is_of_kind?(tdd, [:list, :map, :bitstring]) - end - - def satisfies_constraint?(tdd, %Constraint{kind: :string_chars}) do - tdd_is_of_kind?(tdd, [:bitstring, :atom]) - end - - def satisfies_constraint?(_tdd, %Constraint{kind: :custom}) do - raise "Custom constraints not implemented yet" - end - - # Default fallback: constraint not recognized - def satisfies_constraint?(_tdd, %Constraint{kind: kind}) do - raise ArgumentError, "Unknown constraint kind: #{inspect(kind)}" - end - - @doc """ - Checks if a TDD is semantically a subtype of any of the specified kinds. - Used to approximate constraint satisfaction structurally. - """ - def tdd_is_of_kind?(:any, _), do: true - def tdd_is_of_kind?(:none, _), do: false - - def tdd_is_of_kind?(%TDD{decision: pred} = tdd, kinds) do - if pred in kinds do - # Decision directly confirms kind - tdd.yes != :none - else - # Otherwise we conservatively say "no" unless the TDD is union-like - false - end - end - - # === Decision === - defmodule Decision do - @moduledoc """ - A type-level decision predicate used in a TDD node. - """ - - @type t :: - :is_atom - | :is_integer - | :is_float - | :is_binary - | :is_list - | :is_tuple - | :is_map - # General "is a function" - | :is_function - | :is_pid - | :is_reference - | {:literal, term()} - | {:tuple_len, pos_integer()} - # Type of a map key (used in structural map checks) - | {:key, TDD.t()} - | {:has_struct_key, atom()} - # A type variable name, e.g., "~a" - | {:var, String.t()} - # New - | {:is_function_sig, param_types :: [TDD.t()], return_type :: TDD.t()} - end - - @doc "Creates a TDD for a specific function signature" - def tdd_function_sig(param_types, return_type) - when is_list(param_types) and (is_struct(return_type, TDD) or return_type in [:any, :none]) do - %TDD{ - decision: {:is_function_sig, param_types, return_type}, - # A value matches if it's a function of this signature - yes: :any, - no: :none, - # Maybe it's some other function - maybe: %TDD{decision: :is_function, yes: :any, no: :none, maybe: :none} - } - end - - # ... (existing tdd_or, tdd_and, tdd_not, tdd_diff) ... - - @doc """ - Performs type variable substitution in a TDD, - replacing variables found in the given `env` map (var_name -> TDD). - """ - def tdd_substitute(:any, _env), do: :any - def tdd_substitute(:none, _env), do: :none - - def tdd_substitute(%TDD{decision: {:var, name}} = tdd, env) when is_map(env) do - # If var 'name' is in env, substitute it. Otherwise, keep the var. - Map.get(env, name, tdd) - end - - def tdd_substitute(%TDD{decision: {:is_function_sig, params, ret_type}} = tdd, env) do - # Substitute within the signature parts - substituted_params = Enum.map(params, &tdd_substitute(&1, env)) - substituted_ret_type = tdd_substitute(ret_type, env) - - # Reconstruct the TDD node, keeping yes/no/maybe branches as they are fixed for this predicate. - # Note: If canonicalization (mk_tdd) were used, this would go through it. - %TDD{tdd | decision: {:is_function_sig, substituted_params, substituted_ret_type}} - end - - def tdd_substitute(%TDD{decision: {:key, key_type_tdd}} = tdd, env) do - # Substitute within the key type TDD - substituted_key_type = tdd_substitute(key_type_tdd, env) - %TDD{tdd | decision: {:key, substituted_key_type}} - end - - # Generic case for other decisions: substitute in branches - def tdd_substitute(%TDD{} = tdd, env) do - %TDD{ - # Assume decision itself doesn't contain substitutable vars unless handled above - decision: tdd.decision, - yes: tdd_substitute(tdd.yes, env), - no: tdd_substitute(tdd.no, env), - maybe: tdd_substitute(tdd.maybe, env) - } - end - - @doc """ - Performs substitution in a polymorphic type's body, - using the provided `env` (var_name -> TDD). - This substitutes *free* variables in the PolyTDD's body, not its quantified variables. - To instantiate quantified variables, use `Tilly.Inference.instantiate/3`. - """ - def poly_substitute_free_vars(%PolyTDD{vars: _quantified_vars, body: body} = poly_tdd, env) do - # We only substitute variables in the body that are NOT the quantified ones. - # `env` should ideally not contain keys that are names of quantified variables of this PolyTDD. - # For simplicity, if env has a quantified var name, it will be shadowed by the quantified var itself. - # A more robust approach might filter env based on quantified_vars. - substituted_body = tdd_substitute(body, env) - %PolyTDD{poly_tdd | body: substituted_body} - end - - @doc "Finds all free type variable names in a TDD." - def free_vars(:any), do: MapSet.new() - def free_vars(:none), do: MapSet.new() - - def free_vars(%TDD{decision: {:var, name}}) do - MapSet.new([name]) - end - - def free_vars(%TDD{decision: {:is_function_sig, params, ret_type}}) do - param_fvs = Enum.map(params, &free_vars/1) |> Enum.reduce(MapSet.new(), &MapSet.union/2) - ret_fvs = free_vars(ret_type) - MapSet.union(param_fvs, ret_fvs) - # Note: yes/no/maybe branches for this node are typically :any/:none or simple predicates, - # but if they could contain vars, they'd need to be included. - # Current tdd_function_sig has fixed branches. - end - - def free_vars(%TDD{decision: {:key, key_type_tdd}}) do - free_vars(key_type_tdd) - # Similar note about yes/no/maybe branches. - end - - def free_vars(%TDD{yes: yes, no: no, maybe: maybe}) do - MapSet.union(free_vars(yes), MapSet.union(free_vars(no), free_vars(maybe))) - end - - # Helper for PolyTDD free vars (vars free in body that are not quantified) - def free_vars_in_poly_tdd_body(%PolyTDD{vars: quantified_vars_list, body: body}) do - quantified_names = Enum.map(quantified_vars_list, & &1.name) |> MapSet.new() - body_fvs = free_vars(body) - MapSet.difference(body_fvs, quantified_names) - end -end - -defmodule Tilly.Inference do - alias Tilly.Type - alias Tilly.Type.{TDD, Var, PolyTDD, Constraint} - - @typedoc "Type environment: maps variable names (atoms) to their types (TDD or PolyTDD)" - @type type_env :: %{atom() => TDD.t() | PolyTDD.t()} - - @typedoc "Substitution map: maps type variable names (strings) to TDDs" - @type substitution :: %{String.t() => TDD.t()} - - @typedoc "Constraints collected during inference: {type_var_name, constraint}" - @type collected_constraints :: [{String.t(), Constraint.t()}] - - @typedoc """ - Result of inference for an expression: - - inferred_type: The TDD or PolyTDD type of the expression. - - var_counter: The updated counter for generating fresh type variables. - - substitution: The accumulated substitution map. - - constraints: Constraints that need to be satisfied. - """ - @type infer_result :: - {inferred_type :: TDD.t() | PolyTDD.t(), var_counter :: non_neg_integer(), - substitution :: substitution(), constraints :: collected_constraints()} - - # --- Helper for Fresh Type Variables --- - defmodule FreshVar do - @doc "Generates a new type variable name and increments the counter." - @spec next(non_neg_integer()) :: {String.t(), non_neg_integer()} - def next(counter) do - new_var_name = "~t" <> Integer.to_string(counter) - {new_var_name, counter + 1} - end - end - - # --- Core Inference Function --- - - @doc "Infers the type of a Tilly expression." - @spec infer( - expr :: term(), - env :: type_env(), - var_counter :: non_neg_integer(), - sub :: substitution() - ) :: - infer_result() - def infer({:lit, val}, _env, var_counter, sub) do - type = - cond do - # More precise: Type.tdd_literal(val) - is_atom(val) -> Type.tdd_pred(:is_atom) - # Type.tdd_literal(val) - is_integer(val) -> Type.tdd_pred(:is_integer) - # Type.tdd_literal(val) - is_float(val) -> Type.tdd_pred(:is_float) - # Type.tdd_literal(val) - is_binary(val) -> Type.tdd_pred(:is_binary) - # Add other literals as needed - # Fallback for other kinds of literals - true -> Type.tdd_literal(val) - end - - {type, var_counter, sub, []} - end - - def infer({:var, name}, env, var_counter, sub) when is_atom(name) do - case Map.get(env, name) do - nil -> - raise "Unbound variable: #{name}" - - %TDD{} = tdd_type -> - {Type.tdd_substitute(tdd_type, sub), var_counter, sub, []} - - %PolyTDD{} = poly_type -> - {instantiated_type, new_var_counter, new_constraints} = - instantiate(poly_type, var_counter) - - # Apply current substitution to the instantiated type - # (in case fresh vars from instantiation are already in sub from elsewhere) - final_type = Type.tdd_substitute(instantiated_type, sub) - {final_type, new_var_counter, sub, new_constraints} - end - end - - def infer({:fn, param_atoms, body_expr}, env, var_counter, sub) when is_list(param_atoms) do - # 1. Create fresh type variables for parameters - {param_tdd_vars, extended_env, counter_after_params} = - Enum.reduce(param_atoms, {[], env, var_counter}, fn param_name, - {vars_acc, env_acc, c_acc} -> - {fresh_var_name, next_c} = FreshVar.next(c_acc) - param_tdd_var = Type.tdd_var(fresh_var_name) - {[param_tdd_var | vars_acc], Map.put(env_acc, param_name, param_tdd_var), next_c} - end) - - param_types = Enum.reverse(param_tdd_vars) - - # 2. Infer body with extended environment and current substitution - {body_type_raw, counter_after_body, sub_after_body, body_constraints} = - infer(body_expr, extended_env, counter_after_params, sub) - - # 3. Apply the substitution from body inference to parameter types - # This is because unification within the body might refine what the param types can be. - final_param_types = Enum.map(param_types, &Type.tdd_substitute(&1, sub_after_body)) - # Already applied in infer usually - final_body_type = Type.tdd_substitute(body_type_raw, sub_after_body) - - # 4. Construct function type - fun_type = Type.tdd_function_sig(final_param_types, final_body_type) - {fun_type, counter_after_body, sub_after_body, body_constraints} - end - - def infer({:app, fun_expr, arg_exprs}, env, var_counter, sub) when is_list(arg_exprs) do - # 1. Infer function expression - {fun_type_raw, c1, s1, fun_constraints} = infer(fun_expr, env, var_counter, sub) - # Apply substitutions so far - fun_type_template = Type.tdd_substitute(fun_type_raw, s1) - - # 2. Infer argument expressions - {arg_types_raw, c2, s2, args_constraints_lists} = - Enum.map_reduce(arg_exprs, {c1, s1}, fn arg_expr, {c_acc, s_acc} -> - {arg_t, next_c, next_s, arg_c} = infer(arg_expr, env, c_acc, s_acc) - # Pass along type and its constraints - {{arg_t, arg_c}, {next_c, next_s}} - end) - - actual_arg_types = Enum.map(arg_types_raw, fn {t, _cs} -> Type.tdd_substitute(t, s2) end) - all_arg_constraints = Enum.flat_map(arg_types_raw, fn {_t, cs} -> cs end) ++ fun_constraints - - # 3. Unify/Match function type with arguments - # `fun_type_template` is the type of the function (e.g., {:var, "~f"} or an actual fn_sig) - # `s2` is the current global substitution. - {return_type_final, c3, s3, unification_constraints} = - unify_apply(fun_type_template, actual_arg_types, c2, s2) - - {return_type_final, c3, s3, all_arg_constraints ++ unification_constraints} - end - - def infer({:let, [{var_name, val_expr}], body_expr}, env, var_counter, sub) do - # 1. Infer the type of the value expression - {val_type_raw, c1, s1, val_constraints} = infer(val_expr, env, var_counter, sub) - - # 2. Apply current substitution and generalize the value's type - # Generalization happens *before* adding to env, over variables free in val_type but not env. - # The substitution `s1` contains all refinements up to this point. - val_type_substituted = Type.tdd_substitute(val_type_raw, s1) - generalized_val_type = generalize(val_type_substituted, env, s1) - - # 3. Extend environment and infer body - extended_env = Map.put(env, var_name, generalized_val_type) - # Use s1 for body too - {body_type_raw, c2, s2, body_constraints} = infer(body_expr, extended_env, c1, s1) - - # The final substitution s2 incorporates s1 and any changes from body. - # The final body_type is already substituted by s2. - {body_type_raw, c2, s2, val_constraints ++ body_constraints} - end - - # --- Polymorphism: Instantiation and Generalization --- - - @doc "Instantiates a polymorphic type scheme by replacing quantified variables with fresh ones." - def instantiate(%PolyTDD{vars: poly_vars_list, body: body_tdd}, var_counter) do - # Create substitution map from quantified vars to fresh vars - {substitution_to_fresh, new_var_counter, new_constraints} = - Enum.reduce(poly_vars_list, {%{}, var_counter, []}, fn %Var{ - name: q_name, - constraints: q_constraints - }, - {sub_acc, c_acc, cons_acc} -> - {fresh_name, next_c} = FreshVar.next(c_acc) - fresh_tdd_var = Type.tdd_var(fresh_name) - # Associate constraints of the quantified var with the new fresh var - # Tie constraint to fresh var name - fresh_var_constraints = Enum.map(q_constraints, &%Constraint{&1 | arg: fresh_name}) - {Map.put(sub_acc, q_name, fresh_tdd_var), next_c, cons_acc ++ fresh_var_constraints} - end) - - instantiated_body = Type.tdd_substitute(body_tdd, substitution_to_fresh) - {instantiated_body, new_var_counter, new_constraints} - end - - @doc "Generalizes a TDD type into a PolyTDD if it has free variables not in the environment." - def generalize(type_tdd, env, current_sub) do - # Apply current substitution to resolve any vars in type_tdd that are already determined - type_to_generalize = Type.tdd_substitute(type_tdd, current_sub) - - env_free_vars = - env - |> Map.values() - |> Enum.map(&apply_sub_and_get_free_vars(&1, current_sub)) - |> Enum.reduce(MapSet.new(), &MapSet.union/2) - - type_free_vars_set = Type.free_vars(type_to_generalize) - - vars_to_quantify_names = MapSet.difference(type_free_vars_set, env_free_vars) - - if MapSet.size(vars_to_quantify_names) == 0 do - # No variables to quantify, return as is - type_to_generalize - else - quantified_vars_structs = - Enum.map(MapSet.to_list(vars_to_quantify_names), fn var_name -> - # For now, generalized variables have no attached constraints here. - # Constraints arise from usage and are checked later. - %Var{name: var_name, constraints: []} - end) - - %PolyTDD{vars: quantified_vars_structs, body: type_to_generalize} - end - end - - defp apply_sub_and_get_free_vars(%TDD{} = tdd, sub) do - Type.tdd_substitute(tdd, sub) |> Type.free_vars() - end - - defp apply_sub_and_get_free_vars(%PolyTDD{} = poly_tdd, sub) do - # For a PolyTDD in the env, we care about its free variables *after* substitution, - # excluding its own quantified variables. - # Substitutes free vars in body - Type.poly_substitute_free_vars(poly_tdd, sub) - |> Type.free_vars_in_poly_tdd_body() - end - - # --- Unification (Simplified for now) --- - - @doc """ - Constrains variables in t1 and t2 to be compatible and updates the substitution. - If t1 is Var(~a) and t2 is Type T, then ~a's bound becomes current_bound(~a) & T. - If t1 and t2 are concrete, checks their intersection isn't None. - Returns new substitution. Throws on error. - """ - def constrain_and_update_sub(raw_t1, raw_t2, sub) do - # IO.inspect({:constrain_start, raw_t1, raw_t2, sub}, label: "CONSTRAIN") - t1 = tdd_substitute(raw_t1, sub) - t2 = tdd_substitute(raw_t2, sub) - # IO.inspect({:constrain_applied, t1, t2}, label: "CONSTRAIN") - - cond do - # Identical or one is Any (Any & T = T, so effectively no new constraint on T if T is a var already refined from Any) - t1 == t2 -> - sub - - # Effectively constrains t2 if it's a var - t1 == Type.tdd_any() -> - constrain_var_with_type(t2, t1, sub) - - # Effectively constrains t1 if it's a var - t2 == Type.tdd_any() -> - constrain_var_with_type(t1, t2, sub) - - # Case 1: t1 is a variable - %TDD{decision: {:var, v_name1}} = t1 -> - update_var_bound(v_name1, t2, sub, raw_t1, raw_t2) - - # Case 2: t2 is a variable (and t1 is not) - %TDD{decision: {:var, v_name2}} = t2 -> - # Note order for error message - update_var_bound(v_name2, t1, sub, raw_t2, raw_t1) - - # Case 3: Both are function signatures (concrete) - %TDD{decision: {:is_function_sig, params1, ret1}} = t1, - %TDD{decision: {:is_function_sig, params2, ret2}} = t2 -> - if length(params1) != length(params2) do - raise "Type error (constrain): Function arity mismatch between #{inspect(t1)} and #{inspect(t2)}" - end - - # For two function *types* to be compatible/substitutable, their parameters are contravariant, return is covariant. - # However, if we are "unifying" them to be *the same type structure*, then params are covariant. - # Let's assume for now `constrain_and_update_sub` implies they should be "equal or compatible via intersection". - # This means their intersection should be non-None, and vars within them get constrained. - - sub_after_params = - Enum.zip(params1, params2) - |> Enum.reduce(sub, fn {p1, p2}, acc_sub -> - # Params are "unified" directly - constrain_and_update_sub(p1, p2, acc_sub) - end) - - # Return types are "unified" directly - constrain_and_update_sub(ret1, ret2, sub_after_params) - - # TODO: Add cases for Tuples, Lists, TDDMap - # For tuples: length must match, then constrain_and_update_sub elements pairwise. - # %TDD{decision: {:is_tuple, len1}, yes: elements_tdd1} ... - # This requires TDDs to encode tuple elements more directly if we want to unify structurally. - # Current TDD for tuple is just {:tuple_len, N} or general :is_tuple. We need richer TDDs for structural unification. - # For now, this fallback will handle simple tuple predicates. - - # Case 4: Other concrete types. - true -> - intersection = tdd_and(t1, t2) - - if intersection == Type.tdd_none() do - raise "Type error (constrain): Types #{inspect(t1)} (from #{inspect(raw_t1)}) and #{inspect(t2)} (from #{inspect(raw_t2)}) are incompatible (intersection is empty). Current sub: #{inspect(sub)}" - end - - # If they are concrete and compatible, `sub` is unchanged at this level. - sub - end - - defp constrain_var_with_type(%TDD{decision: {:var, v_name}} = var_tdd, other_type, sub) do - # raw_t1, raw_t2 are for error msg context - update_var_bound(v_name, other_type, sub, var_tdd, other_type) - end - - # No var, no sub change here - defp constrain_var_with_type(_concrete_type, _other_type, sub), do: sub - - defp update_var_bound(v_name, constraining_type, sub, raw_var_form, raw_constraining_form) do - # Default to Any - current_bound_v = Map.get(sub, v_name, Type.tdd_any()) - new_bound_v = Type.tdd_and(current_bound_v, constraining_type) - - if new_bound_v == Type.tdd_none() do - original_var_constraint_str = - if raw_var_form != constraining_type, - do: "(from unifying with #{inspect(raw_constraining_form)})", - else: "" - - raise "Type error: Constraining variable #{v_name} with #{inspect(constraining_type)} #{original_var_constraint_str} results in an empty type. Previous bound: #{inspect(current_bound_v)}. Current sub: #{inspect(sub)}" - end - - Map.put(sub, v_name, new_bound_v) - end - - @doc """ - Handles the application of a function type to actual argument types. - `fun_type_template` is the (possibly variable) type of the function. - `actual_arg_types` are the TDDs of the arguments. - `var_counter` and `sub` are current state. - Returns `{final_return_type, new_counter, new_sub, new_constraints}`. - """ - def unify_apply(fun_type_template, actual_arg_types, var_counter, sub) do - # Apply current substitutions to fun_type_template - current_fun_type = Type.tdd_substitute(fun_type_template, sub) - - case current_fun_type do - %TDD{decision: {:var, fun_var_name}} -> - # Function is a type variable. We need to unify it with a newly minted function signature. - {param_var_tds, c1} = - Enum.map_reduce(actual_arg_types, var_counter, fn _arg, c_acc -> - {fresh_name, next_c} = FreshVar.next(c_acc) - {Type.tdd_var(fresh_name), next_c} - end) - - {return_var_name, c2} = FreshVar.next(c1) - return_var_tdd = Type.tdd_var(return_var_name) - - # The new signature that fun_var_name must conform to - synthetic_fun_sig_tdd = Type.tdd_function_sig(param_var_tds, return_var_tdd) - - # Unify the function variable with this synthetic signature - {s1, cons1} = unify(current_fun_type, synthetic_fun_sig_tdd, sub) - - # Now unify actual arguments with the fresh parameter type variables - {s2, cons2_list} = - Enum.zip(actual_arg_types, param_var_tds) - |> Enum.reduce({s1, []}, fn {actual_arg_t, param_var_t}, {s_acc, c_acc_list} -> - {next_s, next_cs} = unify(actual_arg_t, param_var_t, s_acc) - {next_s, [next_cs | c_acc_list]} - end) - - final_return_type = Type.tdd_substitute(return_var_tdd, s2) - {final_return_type, c2, s2, cons1 ++ List.flatten(cons2_list)} - - %TDD{decision: {:is_function_sig, expected_param_types, expected_return_type}} -> - # Function is a known signature. - if length(actual_arg_types) != length(expected_param_types) do - raise "Arity mismatch: expected #{length(expected_param_types)}, got #{length(actual_arg_types)}" - end - - # Unify actual arguments with expected parameter types - {s1, constraints_from_params_list} = - Enum.zip(actual_arg_types, expected_param_types) - |> Enum.reduce({sub, []}, fn {actual_arg_t, expected_param_t}, {s_acc, c_acc_list} -> - {next_s, param_cs} = unify(actual_arg_t, expected_param_t, s_acc) - {next_s, [param_cs | c_acc_list]} - end) - - final_return_type = Type.tdd_substitute(expected_return_type, s1) - {final_return_type, var_counter, s1, List.flatten(constraints_from_params_list)} - - other_type -> - raise "Type error: expected a function, but got #{inspect(other_type)}" - end - end - - @doc "Top-level type checking function for a Tilly program (list of expressions)." - def typecheck_program(exprs, initial_env \\ %{}) do - # For a program, we can infer each top-level expression. - # For `def`s, they would add to the environment. - # For now, let's just infer a single expression. - # A real program would involve modules, defs, etc. - initial_var_counter = 0 - initial_substitution = %{} - - # This is a simplified entry point, inferring a single expression - # A full program checker would iterate, manage top-level defs, etc. - if is_list(exprs) and Enum.count(exprs) == 1 do - [main_expr] = exprs - - {raw_type, _counter, final_sub, constraints} = - infer(main_expr, initial_env, initial_var_counter, initial_substitution) - - final_type = Type.tdd_substitute(raw_type, final_sub) - # Here, you would solve/check `constraints` using `final_sub` - # For example: - Enum.each(constraints, fn {var_name, constraint_obj} -> - var_final_type = Map.get(final_sub, var_name, Type.tdd_var(var_name)) - - unless Type.satisfies_constraint?(var_final_type, constraint_obj) do - raise "Constraint #{inspect(constraint_obj)} not satisfied for #{var_name} (type #{inspect(var_final_type)})" - end - end) - - {:ok, final_type, final_sub} - else - # Placeholder for multi-expression program handling - {:error, "Program must be a single expression for now"} - end - end - end -end +# defmodule Tilly.X.Type do +# @moduledoc """ +# Core type system definitions for Tilly — a Lisp that transpiles to Elixir, +# using set-theoretic types represented as Ternary Decision Diagrams (TDDs). +# +# Supports: +# - Set-theoretic types (union, intersection, negation) +# - Structural polymorphism with `forall` +# - Type constraints (e.g., Enumerable(~a)) +# - Structural map types +# """ +# +# # === Monotype TDD Representation === +# +# defmodule TDD do +# @moduledoc """ +# Represents a ternary decision diagram node for types. +# """ +# +# defstruct [:decision, :yes, :no, :maybe] +# +# @type t :: %__MODULE__{ +# decision: Tilly.Type.Decision.t(), +# yes: TDD.t() | :any | :none, +# no: TDD.t() | :any | :none, +# maybe: TDD.t() | :any | :none +# } +# end +# +# # === Type Variable === +# +# defmodule Var do +# @moduledoc """ +# Represents a type variable in a polymorphic type. +# """ +# +# defstruct [:name, constraints: []] +# +# @type t :: %__MODULE__{ +# name: String.t(), +# constraints: [Tilly.Type.Constraint.t()] +# } +# end +# +# # === Structural Map Type === +# +# defmodule TDDMap do +# @moduledoc """ +# Structural representation of a map type, with per-key typing and optional openness. +# """ +# +# defstruct fields: [], rest: nil +# +# @type t :: %__MODULE__{ +# fields: [{TDD.t(), TDD.t()}], +# rest: TDD.t() | nil +# } +# end +# +# @doc """ +# Checks if t1 is a subtype of t2 under the current substitution. +# t1 <: t2 iff t1 & (not t2) == None +# """ +# def is_subtype(raw_t1, raw_t2, sub) do +# # Use the apply_sub we defined/refined previously +# t1 = tdd_substitute(raw_t1, sub) +# t2 = tdd_substitute(raw_t2, sub) +# +# # Handle edge cases with Any and None for robustness +# cond do +# # None is a subtype of everything +# t1 == tdd_none() -> +# true +# +# # Everything is a subtype of Any +# t2 == tdd_any() -> +# true +# +# # Any is not a subtype of a specific type (unless that type is also Any) +# t1 == tdd_any() and t2 != tdd_any() -> +# false +# +# # A non-None type cannot be a subtype of None +# t2 == tdd_none() and t1 != tdd_none() -> +# false +# +# true -> +# # The core set-theoretic check: t1 \ t2 == None +# tdd_diff(t1, t2) == tdd_none() +# +# # Alternatively: Type.tdd_and(t1, t2) == t1 (but this can be tricky with complex TDDs if not canonical) +# # The difference check is generally more direct for subtyping. +# end +# end +# +# # === Type Decisions (Predicates) === +# +# defmodule Decision do +# @moduledoc """ +# A type-level decision predicate used in a TDD node. +# """ +# +# @type t :: +# :is_atom +# | :is_integer +# | :is_float +# | :is_binary +# | :is_list +# | :is_tuple +# | :is_map +# | :is_function +# | :is_pid +# | :is_reference +# | {:literal, term()} +# | {:tuple_len, pos_integer()} +# | {:key, TDD.t()} +# | {:has_struct_key, atom()} +# | {:var, String.t()} +# end +# +# # === Type Constraints (structural predicates) === +# +# defmodule Constraint do +# @moduledoc """ +# Represents a structural constraint on a type variable, +# similar to a typeclass in Haskell or trait in Rust, but structural. +# """ +# +# defstruct [:kind, :arg] +# +# @type kind :: +# :enumerable +# | :collectable +# | :struct_with_keys +# | :custom +# +# @type t :: %__MODULE__{ +# kind: kind(), +# arg: String.t() | TDD.t() | any() +# } +# end +# +# # === Polymorphic Types (forall + constraints) === +# +# defmodule PolyTDD do +# @moduledoc """ +# Represents a polymorphic type with optional structural constraints. +# """ +# +# defstruct [:vars, :body] +# +# @type t :: %__MODULE__{ +# vars: [Var.t()], +# body: TDD.t() +# } +# end +# +# # === Constants for base types === +# +# @doc "A TDD representing the universal type (any value)" +# def tdd_any, do: :any +# +# @doc "A TDD representing the empty type (no values)" +# def tdd_none, do: :none +# +# @doc "Creates a TDD for a literal value" +# def tdd_literal(value) do +# %TDD{ +# decision: {:literal, value}, +# yes: :any, +# no: :none, +# maybe: :none +# } +# end +# +# @doc "Creates a TDD for a base predicate (e.g., is_atom)" +# def tdd_pred(pred) when is_atom(pred) do +# %TDD{ +# decision: pred, +# yes: :any, +# no: :none, +# maybe: :none +# } +# end +# +# @doc "Creates a TDD for a type variable reference" +# def tdd_var(name) when is_binary(name) do +# %TDD{ +# decision: {:var, name}, +# yes: :any, +# no: :none, +# maybe: :none +# } +# end +# +# @doc """ +# Performs type variable substitution in a TDD, +# replacing variables found in the given `env` map. +# """ +# def tdd_substitute(:any, _env), do: :any +# def tdd_substitute(:none, _env), do: :none +# +# def tdd_substitute(%TDD{decision: {:var, name}}, env) when is_map(env) do +# Map.get(env, name, %TDD{decision: {:var, name}, yes: :any, no: :none, maybe: :none}) +# end +# +# def tdd_substitute(%TDD{} = tdd, env) do +# %TDD{ +# decision: tdd.decision, +# yes: tdd_substitute(tdd.yes, env), +# no: tdd_substitute(tdd.no, env), +# maybe: tdd_substitute(tdd.maybe, env) +# } +# end +# +# @doc """ +# Performs substitution in a polymorphic type, replacing all vars +# in `vars` with given TDDs from `env`. +# """ +# def poly_substitute(%PolyTDD{vars: vars, body: body}, env) do +# var_names = Enum.map(vars, & &1.name) +# restricted_env = Map.take(env, var_names) +# tdd_substitute(body, restricted_env) +# end +# +# # === Constraints === +# +# @doc """ +# Checks whether a TDD satisfies a built-in structural constraint, +# such as Enumerable or String.Chars. +# """ +# def satisfies_constraint?(tdd, %Constraint{kind: :enumerable}) do +# tdd_is_of_kind?(tdd, [:list, :map, :bitstring]) +# end +# +# def satisfies_constraint?(tdd, %Constraint{kind: :string_chars}) do +# tdd_is_of_kind?(tdd, [:bitstring, :atom]) +# end +# +# def satisfies_constraint?(_tdd, %Constraint{kind: :custom}) do +# raise "Custom constraints not implemented yet" +# end +# +# # Default fallback: constraint not recognized +# def satisfies_constraint?(_tdd, %Constraint{kind: kind}) do +# raise ArgumentError, "Unknown constraint kind: #{inspect(kind)}" +# end +# +# @doc """ +# Checks if a TDD is semantically a subtype of any of the specified kinds. +# Used to approximate constraint satisfaction structurally. +# """ +# def tdd_is_of_kind?(:any, _), do: true +# def tdd_is_of_kind?(:none, _), do: false +# +# def tdd_is_of_kind?(%TDD{decision: pred} = tdd, kinds) do +# if pred in kinds do +# # Decision directly confirms kind +# tdd.yes != :none +# else +# # Otherwise we conservatively say "no" unless the TDD is union-like +# false +# end +# end +# +# # === Decision === +# defmodule Decision do +# @moduledoc """ +# A type-level decision predicate used in a TDD node. +# """ +# +# @type t :: +# :is_atom +# | :is_integer +# | :is_float +# | :is_binary +# | :is_list +# | :is_tuple +# | :is_map +# # General "is a function" +# | :is_function +# | :is_pid +# | :is_reference +# | {:literal, term()} +# | {:tuple_len, pos_integer()} +# # Type of a map key (used in structural map checks) +# | {:key, TDD.t()} +# | {:has_struct_key, atom()} +# # A type variable name, e.g., "~a" +# | {:var, String.t()} +# # New +# | {:is_function_sig, param_types :: [TDD.t()], return_type :: TDD.t()} +# end +# +# @doc "Creates a TDD for a specific function signature" +# def tdd_function_sig(param_types, return_type) +# when is_list(param_types) and (is_struct(return_type, TDD) or return_type in [:any, :none]) do +# %TDD{ +# decision: {:is_function_sig, param_types, return_type}, +# # A value matches if it's a function of this signature +# yes: :any, +# no: :none, +# # Maybe it's some other function +# maybe: %TDD{decision: :is_function, yes: :any, no: :none, maybe: :none} +# } +# end +# +# # ... (existing tdd_or, tdd_and, tdd_not, tdd_diff) ... +# +# @doc """ +# Performs type variable substitution in a TDD, +# replacing variables found in the given `env` map (var_name -> TDD). +# """ +# def tdd_substitute(:any, _env), do: :any +# def tdd_substitute(:none, _env), do: :none +# +# def tdd_substitute(%TDD{decision: {:var, name}} = tdd, env) when is_map(env) do +# # If var 'name' is in env, substitute it. Otherwise, keep the var. +# Map.get(env, name, tdd) +# end +# +# def tdd_substitute(%TDD{decision: {:is_function_sig, params, ret_type}} = tdd, env) do +# # Substitute within the signature parts +# substituted_params = Enum.map(params, &tdd_substitute(&1, env)) +# substituted_ret_type = tdd_substitute(ret_type, env) +# +# # Reconstruct the TDD node, keeping yes/no/maybe branches as they are fixed for this predicate. +# # Note: If canonicalization (mk_tdd) were used, this would go through it. +# %TDD{tdd | decision: {:is_function_sig, substituted_params, substituted_ret_type}} +# end +# +# def tdd_substitute(%TDD{decision: {:key, key_type_tdd}} = tdd, env) do +# # Substitute within the key type TDD +# substituted_key_type = tdd_substitute(key_type_tdd, env) +# %TDD{tdd | decision: {:key, substituted_key_type}} +# end +# +# # Generic case for other decisions: substitute in branches +# def tdd_substitute(%TDD{} = tdd, env) do +# %TDD{ +# # Assume decision itself doesn't contain substitutable vars unless handled above +# decision: tdd.decision, +# yes: tdd_substitute(tdd.yes, env), +# no: tdd_substitute(tdd.no, env), +# maybe: tdd_substitute(tdd.maybe, env) +# } +# end +# +# @doc """ +# Performs substitution in a polymorphic type's body, +# using the provided `env` (var_name -> TDD). +# This substitutes *free* variables in the PolyTDD's body, not its quantified variables. +# To instantiate quantified variables, use `Tilly.Inference.instantiate/3`. +# """ +# def poly_substitute_free_vars(%PolyTDD{vars: _quantified_vars, body: body} = poly_tdd, env) do +# # We only substitute variables in the body that are NOT the quantified ones. +# # `env` should ideally not contain keys that are names of quantified variables of this PolyTDD. +# # For simplicity, if env has a quantified var name, it will be shadowed by the quantified var itself. +# # A more robust approach might filter env based on quantified_vars. +# substituted_body = tdd_substitute(body, env) +# %PolyTDD{poly_tdd | body: substituted_body} +# end +# +# @doc "Finds all free type variable names in a TDD." +# def free_vars(:any), do: MapSet.new() +# def free_vars(:none), do: MapSet.new() +# +# def free_vars(%TDD{decision: {:var, name}}) do +# MapSet.new([name]) +# end +# +# def free_vars(%TDD{decision: {:is_function_sig, params, ret_type}}) do +# param_fvs = Enum.map(params, &free_vars/1) |> Enum.reduce(MapSet.new(), &MapSet.union/2) +# ret_fvs = free_vars(ret_type) +# MapSet.union(param_fvs, ret_fvs) +# # Note: yes/no/maybe branches for this node are typically :any/:none or simple predicates, +# # but if they could contain vars, they'd need to be included. +# # Current tdd_function_sig has fixed branches. +# end +# +# def free_vars(%TDD{decision: {:key, key_type_tdd}}) do +# free_vars(key_type_tdd) +# # Similar note about yes/no/maybe branches. +# end +# +# def free_vars(%TDD{yes: yes, no: no, maybe: maybe}) do +# MapSet.union(free_vars(yes), MapSet.union(free_vars(no), free_vars(maybe))) +# end +# +# # Helper for PolyTDD free vars (vars free in body that are not quantified) +# def free_vars_in_poly_tdd_body(%PolyTDD{vars: quantified_vars_list, body: body}) do +# quantified_names = Enum.map(quantified_vars_list, & &1.name) |> MapSet.new() +# body_fvs = free_vars(body) +# MapSet.difference(body_fvs, quantified_names) +# end +# end +# +# defmodule Tilly.Inference do +# alias Tilly.Type +# alias Tilly.Type.{TDD, Var, PolyTDD, Constraint} +# +# @typedoc "Type environment: maps variable names (atoms) to their types (TDD or PolyTDD)" +# @type type_env :: %{atom() => TDD.t() | PolyTDD.t()} +# +# @typedoc "Substitution map: maps type variable names (strings) to TDDs" +# @type substitution :: %{String.t() => TDD.t()} +# +# @typedoc "Constraints collected during inference: {type_var_name, constraint}" +# @type collected_constraints :: [{String.t(), Constraint.t()}] +# +# @typedoc """ +# Result of inference for an expression: +# - inferred_type: The TDD or PolyTDD type of the expression. +# - var_counter: The updated counter for generating fresh type variables. +# - substitution: The accumulated substitution map. +# - constraints: Constraints that need to be satisfied. +# """ +# @type infer_result :: +# {inferred_type :: TDD.t() | PolyTDD.t(), var_counter :: non_neg_integer(), +# substitution :: substitution(), constraints :: collected_constraints()} +# +# # --- Helper for Fresh Type Variables --- +# defmodule FreshVar do +# @doc "Generates a new type variable name and increments the counter." +# @spec next(non_neg_integer()) :: {String.t(), non_neg_integer()} +# def next(counter) do +# new_var_name = "~t" <> Integer.to_string(counter) +# {new_var_name, counter + 1} +# end +# end +# +# # --- Core Inference Function --- +# +# @doc "Infers the type of a Tilly expression." +# @spec infer( +# expr :: term(), +# env :: type_env(), +# var_counter :: non_neg_integer(), +# sub :: substitution() +# ) :: +# infer_result() +# def infer({:lit, val}, _env, var_counter, sub) do +# type = +# cond do +# # More precise: Type.tdd_literal(val) +# is_atom(val) -> Type.tdd_pred(:is_atom) +# # Type.tdd_literal(val) +# is_integer(val) -> Type.tdd_pred(:is_integer) +# # Type.tdd_literal(val) +# is_float(val) -> Type.tdd_pred(:is_float) +# # Type.tdd_literal(val) +# is_binary(val) -> Type.tdd_pred(:is_binary) +# # Add other literals as needed +# # Fallback for other kinds of literals +# true -> Type.tdd_literal(val) +# end +# +# {type, var_counter, sub, []} +# end +# +# def infer({:var, name}, env, var_counter, sub) when is_atom(name) do +# case Map.get(env, name) do +# nil -> +# raise "Unbound variable: #{name}" +# +# %TDD{} = tdd_type -> +# {Type.tdd_substitute(tdd_type, sub), var_counter, sub, []} +# +# %PolyTDD{} = poly_type -> +# {instantiated_type, new_var_counter, new_constraints} = +# instantiate(poly_type, var_counter) +# +# # Apply current substitution to the instantiated type +# # (in case fresh vars from instantiation are already in sub from elsewhere) +# final_type = Type.tdd_substitute(instantiated_type, sub) +# {final_type, new_var_counter, sub, new_constraints} +# end +# end +# +# def infer({:fn, param_atoms, body_expr}, env, var_counter, sub) when is_list(param_atoms) do +# # 1. Create fresh type variables for parameters +# {param_tdd_vars, extended_env, counter_after_params} = +# Enum.reduce(param_atoms, {[], env, var_counter}, fn param_name, +# {vars_acc, env_acc, c_acc} -> +# {fresh_var_name, next_c} = FreshVar.next(c_acc) +# param_tdd_var = Type.tdd_var(fresh_var_name) +# {[param_tdd_var | vars_acc], Map.put(env_acc, param_name, param_tdd_var), next_c} +# end) +# +# param_types = Enum.reverse(param_tdd_vars) +# +# # 2. Infer body with extended environment and current substitution +# {body_type_raw, counter_after_body, sub_after_body, body_constraints} = +# infer(body_expr, extended_env, counter_after_params, sub) +# +# # 3. Apply the substitution from body inference to parameter types +# # This is because unification within the body might refine what the param types can be. +# final_param_types = Enum.map(param_types, &Type.tdd_substitute(&1, sub_after_body)) +# # Already applied in infer usually +# final_body_type = Type.tdd_substitute(body_type_raw, sub_after_body) +# +# # 4. Construct function type +# fun_type = Type.tdd_function_sig(final_param_types, final_body_type) +# {fun_type, counter_after_body, sub_after_body, body_constraints} +# end +# +# def infer({:app, fun_expr, arg_exprs}, env, var_counter, sub) when is_list(arg_exprs) do +# # 1. Infer function expression +# {fun_type_raw, c1, s1, fun_constraints} = infer(fun_expr, env, var_counter, sub) +# # Apply substitutions so far +# fun_type_template = Type.tdd_substitute(fun_type_raw, s1) +# +# # 2. Infer argument expressions +# {arg_types_raw, c2, s2, args_constraints_lists} = +# Enum.map_reduce(arg_exprs, {c1, s1}, fn arg_expr, {c_acc, s_acc} -> +# {arg_t, next_c, next_s, arg_c} = infer(arg_expr, env, c_acc, s_acc) +# # Pass along type and its constraints +# {{arg_t, arg_c}, {next_c, next_s}} +# end) +# +# actual_arg_types = Enum.map(arg_types_raw, fn {t, _cs} -> Type.tdd_substitute(t, s2) end) +# all_arg_constraints = Enum.flat_map(arg_types_raw, fn {_t, cs} -> cs end) ++ fun_constraints +# +# # 3. Unify/Match function type with arguments +# # `fun_type_template` is the type of the function (e.g., {:var, "~f"} or an actual fn_sig) +# # `s2` is the current global substitution. +# {return_type_final, c3, s3, unification_constraints} = +# unify_apply(fun_type_template, actual_arg_types, c2, s2) +# +# {return_type_final, c3, s3, all_arg_constraints ++ unification_constraints} +# end +# +# def infer({:let, [{var_name, val_expr}], body_expr}, env, var_counter, sub) do +# # 1. Infer the type of the value expression +# {val_type_raw, c1, s1, val_constraints} = infer(val_expr, env, var_counter, sub) +# +# # 2. Apply current substitution and generalize the value's type +# # Generalization happens *before* adding to env, over variables free in val_type but not env. +# # The substitution `s1` contains all refinements up to this point. +# val_type_substituted = Type.tdd_substitute(val_type_raw, s1) +# generalized_val_type = generalize(val_type_substituted, env, s1) +# +# # 3. Extend environment and infer body +# extended_env = Map.put(env, var_name, generalized_val_type) +# # Use s1 for body too +# {body_type_raw, c2, s2, body_constraints} = infer(body_expr, extended_env, c1, s1) +# +# # The final substitution s2 incorporates s1 and any changes from body. +# # The final body_type is already substituted by s2. +# {body_type_raw, c2, s2, val_constraints ++ body_constraints} +# end +# +# # --- Polymorphism: Instantiation and Generalization --- +# +# @doc "Instantiates a polymorphic type scheme by replacing quantified variables with fresh ones." +# def instantiate(%PolyTDD{vars: poly_vars_list, body: body_tdd}, var_counter) do +# # Create substitution map from quantified vars to fresh vars +# {substitution_to_fresh, new_var_counter, new_constraints} = +# Enum.reduce(poly_vars_list, {%{}, var_counter, []}, fn %Var{ +# name: q_name, +# constraints: q_constraints +# }, +# {sub_acc, c_acc, cons_acc} -> +# {fresh_name, next_c} = FreshVar.next(c_acc) +# fresh_tdd_var = Type.tdd_var(fresh_name) +# # Associate constraints of the quantified var with the new fresh var +# # Tie constraint to fresh var name +# fresh_var_constraints = Enum.map(q_constraints, &%Constraint{&1 | arg: fresh_name}) +# {Map.put(sub_acc, q_name, fresh_tdd_var), next_c, cons_acc ++ fresh_var_constraints} +# end) +# +# instantiated_body = Type.tdd_substitute(body_tdd, substitution_to_fresh) +# {instantiated_body, new_var_counter, new_constraints} +# end +# +# @doc "Generalizes a TDD type into a PolyTDD if it has free variables not in the environment." +# def generalize(type_tdd, env, current_sub) do +# # Apply current substitution to resolve any vars in type_tdd that are already determined +# type_to_generalize = Type.tdd_substitute(type_tdd, current_sub) +# +# env_free_vars = +# env +# |> Map.values() +# |> Enum.map(&apply_sub_and_get_free_vars(&1, current_sub)) +# |> Enum.reduce(MapSet.new(), &MapSet.union/2) +# +# type_free_vars_set = Type.free_vars(type_to_generalize) +# +# vars_to_quantify_names = MapSet.difference(type_free_vars_set, env_free_vars) +# +# if MapSet.size(vars_to_quantify_names) == 0 do +# # No variables to quantify, return as is +# type_to_generalize +# else +# quantified_vars_structs = +# Enum.map(MapSet.to_list(vars_to_quantify_names), fn var_name -> +# # For now, generalized variables have no attached constraints here. +# # Constraints arise from usage and are checked later. +# %Var{name: var_name, constraints: []} +# end) +# +# %PolyTDD{vars: quantified_vars_structs, body: type_to_generalize} +# end +# end +# +# defp apply_sub_and_get_free_vars(%TDD{} = tdd, sub) do +# Type.tdd_substitute(tdd, sub) |> Type.free_vars() +# end +# +# defp apply_sub_and_get_free_vars(%PolyTDD{} = poly_tdd, sub) do +# # For a PolyTDD in the env, we care about its free variables *after* substitution, +# # excluding its own quantified variables. +# # Substitutes free vars in body +# Type.poly_substitute_free_vars(poly_tdd, sub) +# |> Type.free_vars_in_poly_tdd_body() +# end +# +# # --- Unification (Simplified for now) --- +# +# @doc """ +# Constrains variables in t1 and t2 to be compatible and updates the substitution. +# If t1 is Var(~a) and t2 is Type T, then ~a's bound becomes current_bound(~a) & T. +# If t1 and t2 are concrete, checks their intersection isn't None. +# Returns new substitution. Throws on error. +# """ +# def constrain_and_update_sub(raw_t1, raw_t2, sub) do +# # IO.inspect({:constrain_start, raw_t1, raw_t2, sub}, label: "CONSTRAIN") +# t1 = tdd_substitute(raw_t1, sub) +# t2 = tdd_substitute(raw_t2, sub) +# # IO.inspect({:constrain_applied, t1, t2}, label: "CONSTRAIN") +# +# cond do +# # Identical or one is Any (Any & T = T, so effectively no new constraint on T if T is a var already refined from Any) +# t1 == t2 -> +# sub +# +# # Effectively constrains t2 if it's a var +# t1 == Type.tdd_any() -> +# constrain_var_with_type(t2, t1, sub) +# +# # Effectively constrains t1 if it's a var +# t2 == Type.tdd_any() -> +# constrain_var_with_type(t1, t2, sub) +# +# # Case 1: t1 is a variable +# %TDD{decision: {:var, v_name1}} = t1 -> +# update_var_bound(v_name1, t2, sub, raw_t1, raw_t2) +# +# # Case 2: t2 is a variable (and t1 is not) +# %TDD{decision: {:var, v_name2}} = t2 -> +# # Note order for error message +# update_var_bound(v_name2, t1, sub, raw_t2, raw_t1) +# +# # Case 3: Both are function signatures (concrete) +# %TDD{decision: {:is_function_sig, params1, ret1}} = t1, +# %TDD{decision: {:is_function_sig, params2, ret2}} = t2 -> +# if length(params1) != length(params2) do +# raise "Type error (constrain): Function arity mismatch between #{inspect(t1)} and #{inspect(t2)}" +# end +# +# # For two function *types* to be compatible/substitutable, their parameters are contravariant, return is covariant. +# # However, if we are "unifying" them to be *the same type structure*, then params are covariant. +# # Let's assume for now `constrain_and_update_sub` implies they should be "equal or compatible via intersection". +# # This means their intersection should be non-None, and vars within them get constrained. +# +# sub_after_params = +# Enum.zip(params1, params2) +# |> Enum.reduce(sub, fn {p1, p2}, acc_sub -> +# # Params are "unified" directly +# constrain_and_update_sub(p1, p2, acc_sub) +# end) +# +# # Return types are "unified" directly +# constrain_and_update_sub(ret1, ret2, sub_after_params) +# +# # TODO: Add cases for Tuples, Lists, TDDMap +# # For tuples: length must match, then constrain_and_update_sub elements pairwise. +# # %TDD{decision: {:is_tuple, len1}, yes: elements_tdd1} ... +# # This requires TDDs to encode tuple elements more directly if we want to unify structurally. +# # Current TDD for tuple is just {:tuple_len, N} or general :is_tuple. We need richer TDDs for structural unification. +# # For now, this fallback will handle simple tuple predicates. +# +# # Case 4: Other concrete types. +# true -> +# intersection = tdd_and(t1, t2) +# +# if intersection == Type.tdd_none() do +# raise "Type error (constrain): Types #{inspect(t1)} (from #{inspect(raw_t1)}) and #{inspect(t2)} (from #{inspect(raw_t2)}) are incompatible (intersection is empty). Current sub: #{inspect(sub)}" +# end +# +# # If they are concrete and compatible, `sub` is unchanged at this level. +# sub +# end +# +# defp constrain_var_with_type(%TDD{decision: {:var, v_name}} = var_tdd, other_type, sub) do +# # raw_t1, raw_t2 are for error msg context +# update_var_bound(v_name, other_type, sub, var_tdd, other_type) +# end +# +# # No var, no sub change here +# defp constrain_var_with_type(_concrete_type, _other_type, sub), do: sub +# +# defp update_var_bound(v_name, constraining_type, sub, raw_var_form, raw_constraining_form) do +# # Default to Any +# current_bound_v = Map.get(sub, v_name, Type.tdd_any()) +# new_bound_v = Type.tdd_and(current_bound_v, constraining_type) +# +# if new_bound_v == Type.tdd_none() do +# original_var_constraint_str = +# if raw_var_form != constraining_type, +# do: "(from unifying with #{inspect(raw_constraining_form)})", +# else: "" +# +# raise "Type error: Constraining variable #{v_name} with #{inspect(constraining_type)} #{original_var_constraint_str} results in an empty type. Previous bound: #{inspect(current_bound_v)}. Current sub: #{inspect(sub)}" +# end +# +# Map.put(sub, v_name, new_bound_v) +# end +# +# @doc """ +# Handles the application of a function type to actual argument types. +# `fun_type_template` is the (possibly variable) type of the function. +# `actual_arg_types` are the TDDs of the arguments. +# `var_counter` and `sub` are current state. +# Returns `{final_return_type, new_counter, new_sub, new_constraints}`. +# """ +# def unify_apply(fun_type_template, actual_arg_types, var_counter, sub) do +# # Apply current substitutions to fun_type_template +# current_fun_type = Type.tdd_substitute(fun_type_template, sub) +# +# case current_fun_type do +# %TDD{decision: {:var, fun_var_name}} -> +# # Function is a type variable. We need to unify it with a newly minted function signature. +# {param_var_tds, c1} = +# Enum.map_reduce(actual_arg_types, var_counter, fn _arg, c_acc -> +# {fresh_name, next_c} = FreshVar.next(c_acc) +# {Type.tdd_var(fresh_name), next_c} +# end) +# +# {return_var_name, c2} = FreshVar.next(c1) +# return_var_tdd = Type.tdd_var(return_var_name) +# +# # The new signature that fun_var_name must conform to +# synthetic_fun_sig_tdd = Type.tdd_function_sig(param_var_tds, return_var_tdd) +# +# # Unify the function variable with this synthetic signature +# {s1, cons1} = unify(current_fun_type, synthetic_fun_sig_tdd, sub) +# +# # Now unify actual arguments with the fresh parameter type variables +# {s2, cons2_list} = +# Enum.zip(actual_arg_types, param_var_tds) +# |> Enum.reduce({s1, []}, fn {actual_arg_t, param_var_t}, {s_acc, c_acc_list} -> +# {next_s, next_cs} = unify(actual_arg_t, param_var_t, s_acc) +# {next_s, [next_cs | c_acc_list]} +# end) +# +# final_return_type = Type.tdd_substitute(return_var_tdd, s2) +# {final_return_type, c2, s2, cons1 ++ List.flatten(cons2_list)} +# +# %TDD{decision: {:is_function_sig, expected_param_types, expected_return_type}} -> +# # Function is a known signature. +# if length(actual_arg_types) != length(expected_param_types) do +# raise "Arity mismatch: expected #{length(expected_param_types)}, got #{length(actual_arg_types)}" +# end +# +# # Unify actual arguments with expected parameter types +# {s1, constraints_from_params_list} = +# Enum.zip(actual_arg_types, expected_param_types) +# |> Enum.reduce({sub, []}, fn {actual_arg_t, expected_param_t}, {s_acc, c_acc_list} -> +# {next_s, param_cs} = unify(actual_arg_t, expected_param_t, s_acc) +# {next_s, [param_cs | c_acc_list]} +# end) +# +# final_return_type = Type.tdd_substitute(expected_return_type, s1) +# {final_return_type, var_counter, s1, List.flatten(constraints_from_params_list)} +# +# other_type -> +# raise "Type error: expected a function, but got #{inspect(other_type)}" +# end +# end +# +# @doc "Top-level type checking function for a Tilly program (list of expressions)." +# def typecheck_program(exprs, initial_env \\ %{}) do +# # For a program, we can infer each top-level expression. +# # For `def`s, they would add to the environment. +# # For now, let's just infer a single expression. +# # A real program would involve modules, defs, etc. +# initial_var_counter = 0 +# initial_substitution = %{} +# +# # This is a simplified entry point, inferring a single expression +# # A full program checker would iterate, manage top-level defs, etc. +# if is_list(exprs) and Enum.count(exprs) == 1 do +# [main_expr] = exprs +# +# {raw_type, _counter, final_sub, constraints} = +# infer(main_expr, initial_env, initial_var_counter, initial_substitution) +# +# final_type = Type.tdd_substitute(raw_type, final_sub) +# # Here, you would solve/check `constraints` using `final_sub` +# # For example: +# Enum.each(constraints, fn {var_name, constraint_obj} -> +# var_final_type = Map.get(final_sub, var_name, Type.tdd_var(var_name)) +# +# unless Type.satisfies_constraint?(var_final_type, constraint_obj) do +# raise "Constraint #{inspect(constraint_obj)} not satisfied for #{var_name} (type #{inspect(var_final_type)})" +# end +# end) +# +# {:ok, final_type, final_sub} +# else +# # Placeholder for multi-expression program handling +# {:error, "Program must be a single expression for now"} +# end +# end +# end +# end diff --git a/lib/tilly/bdd.ex b/lib/tilly/bdd.ex deleted file mode 100644 index 1d24fd3..0000000 --- a/lib/tilly/bdd.ex +++ /dev/null @@ -1,146 +0,0 @@ -defmodule Tilly.BDD do - @moduledoc """ - Manages the BDD store, including hash-consing of BDD nodes. - The BDD store is expected to be part of a `typing_ctx` map under the key `:bdd_store`. - """ - - alias Tilly.BDD.Node - - @false_node_id 0 - @true_node_id 1 - @initial_next_node_id 2 - @universal_ops_module :universal_ops - - @doc """ - Initializes the BDD store within the typing context. - Pre-interns canonical `false` and `true` BDD nodes. - """ - def init_bdd_store(typing_ctx) when is_map(typing_ctx) do - false_structure = Node.mk_false() - true_structure = Node.mk_true() - - bdd_store = %{ - nodes_by_structure: %{ - {false_structure, @universal_ops_module} => @false_node_id, - {true_structure, @universal_ops_module} => @true_node_id - }, - structures_by_id: %{ - @false_node_id => %{structure: false_structure, ops_module: @universal_ops_module}, - @true_node_id => %{structure: true_structure, ops_module: @universal_ops_module} - }, - next_node_id: @initial_next_node_id, - ops_cache: %{} # Cache for BDD operations {op_key, id1, id2} -> result_id - } - - Map.put(typing_ctx, :bdd_store, bdd_store) - end - - @doc """ - Gets an existing BDD node ID or interns a new one if it's not already in the store. - - Returns a tuple `{new_typing_ctx, node_id}`. - The `typing_ctx` is updated if a new node is interned. - """ - def get_or_intern_node(typing_ctx, logical_structure, ops_module_atom) do - bdd_store = Map.get(typing_ctx, :bdd_store) - - unless bdd_store do - raise ArgumentError, "BDD store not initialized in typing_ctx. Call init_bdd_store first." - end - - key = {logical_structure, ops_module_atom} - - case Map.get(bdd_store.nodes_by_structure, key) do - nil -> - # Node not found, intern it - node_id = bdd_store.next_node_id - - new_nodes_by_structure = Map.put(bdd_store.nodes_by_structure, key, node_id) - - node_data = %{structure: logical_structure, ops_module: ops_module_atom} - new_structures_by_id = Map.put(bdd_store.structures_by_id, node_id, node_data) - - new_next_node_id = node_id + 1 - - new_bdd_store = - %{ - bdd_store - | nodes_by_structure: new_nodes_by_structure, - structures_by_id: new_structures_by_id, - next_node_id: new_next_node_id - } - - new_typing_ctx = Map.put(typing_ctx, :bdd_store, new_bdd_store) - {new_typing_ctx, node_id} - - existing_node_id -> - # Node found - {typing_ctx, existing_node_id} - end - end - - @doc """ - Retrieves the node's structure and ops_module from the BDD store. - Returns `%{structure: logical_structure_tuple, ops_module: ops_module_atom}` or `nil` if not found. - """ - def get_node_data(typing_ctx, node_id) do - with %{bdd_store: %{structures_by_id: structures_by_id}} <- typing_ctx, - data when not is_nil(data) <- Map.get(structures_by_id, node_id) do - data - else - _ -> nil - end - end - - @doc """ - Checks if the given node ID corresponds to the canonical `false` BDD node. - """ - def is_false_node?(typing_ctx, node_id) do - # Optimized check for the predefined ID - if node_id == @false_node_id do - true - else - # Fallback for cases where a node might be structurally false but not have the canonical ID. - # This should ideally not happen with proper interning of Node.mk_false() via get_or_intern_node. - case get_node_data(typing_ctx, node_id) do - %{structure: structure, ops_module: @universal_ops_module} -> - structure == Node.mk_false() - _ -> - false - end - end - end - - @doc """ - Checks if the given node ID corresponds to the canonical `true` BDD node. - """ - def is_true_node?(typing_ctx, node_id) do - # Optimized check for the predefined ID - if node_id == @true_node_id do - true - else - # Fallback for cases where a node might be structurally true but not have the canonical ID. - case get_node_data(typing_ctx, node_id) do - %{structure: structure, ops_module: @universal_ops_module} -> - structure == Node.mk_true() - _ -> - false - end - end - end - - @doc """ - Returns the canonical ID for the `false` BDD node. - """ - def false_node_id(), do: @false_node_id - - @doc """ - Returns the canonical ID for the `true` BDD node. - """ - def true_node_id(), do: @true_node_id - - @doc """ - Returns the atom used as the `ops_module` for universal nodes like `true` and `false`. - """ - def universal_ops_module(), do: @universal_ops_module -end diff --git a/lib/tilly/bdd/atom_bool_ops.ex b/lib/tilly/bdd/atom_bool_ops.ex deleted file mode 100644 index 1a82dbd..0000000 --- a/lib/tilly/bdd/atom_bool_ops.ex +++ /dev/null @@ -1,89 +0,0 @@ -defmodule Tilly.BDD.AtomBoolOps do - @moduledoc """ - BDD operations module for sets of atoms. - Elements are atoms, and leaf values are booleans. - """ - - @doc """ - Compares two atoms. - Returns `:lt`, `:eq`, or `:gt`. - """ - def compare_elements(elem1, elem2) when is_atom(elem1) and is_atom(elem2) do - cond do - elem1 < elem2 -> :lt - elem1 > elem2 -> :gt - true -> :eq - end - end - - @doc """ - Checks if two atoms are equal. - """ - def equal_element?(elem1, elem2) when is_atom(elem1) and is_atom(elem2) do - elem1 == elem2 - end - - @doc """ - Hashes an atom. - """ - def hash_element(elem) when is_atom(elem) do - # erlang.phash2 is suitable for term hashing - :erlang.phash2(elem) - end - - @doc """ - The leaf value representing an empty set of atoms (false). - """ - def empty_leaf(), do: false - - @doc """ - The leaf value representing the universal set of atoms (true). - This is used if a BDD simplifies to a state where all atoms of this kind are included. - """ - def any_leaf(), do: true - - @doc """ - Checks if a leaf value represents an empty set. - """ - def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do - leaf_val == false - end - - @doc """ - Computes the union of two leaf values. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 or leaf2 - end - - @doc """ - Computes the intersection of two leaf values. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def intersection_leaves(_typing_ctx, leaf1, leaf2) - when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 and leaf2 - end - - @doc """ - Computes the negation of a leaf value. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do - not leaf - end - - # def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - # leaf1 and (not leaf2) - # end - - @doc """ - Tests a leaf value to determine if it represents an empty, full, or other set. - Returns `:empty`, `:full`, or `:other`. - """ - def test_leaf_value(true), do: :full - def test_leaf_value(false), do: :empty - # Add a clause for other types if atoms could have non-boolean leaf values - # def test_leaf_value(_other), do: :other -end diff --git a/lib/tilly/bdd/integer_bool_ops.ex b/lib/tilly/bdd/integer_bool_ops.ex deleted file mode 100644 index dc95c0e..0000000 --- a/lib/tilly/bdd/integer_bool_ops.ex +++ /dev/null @@ -1,87 +0,0 @@ -defmodule Tilly.BDD.IntegerBoolOps do - @moduledoc """ - BDD Operations module for BDDs where elements are integers and leaves are booleans. - """ - - @doc """ - Compares two integer elements. - Returns `:lt`, `:eq`, or `:gt`. - """ - def compare_elements(elem1, elem2) when is_integer(elem1) and is_integer(elem2) do - cond do - elem1 < elem2 -> :lt - elem1 > elem2 -> :gt - true -> :eq - end - end - - @doc """ - Checks if two integer elements are equal. - """ - def equal_element?(elem1, elem2) when is_integer(elem1) and is_integer(elem2) do - elem1 == elem2 - end - - @doc """ - Hashes an integer element. - """ - def hash_element(elem) when is_integer(elem) do - elem - end - - @doc """ - Returns the leaf value representing emptiness (false). - """ - def empty_leaf(), do: false - - @doc """ - Returns the leaf value representing universality (true). - """ - def any_leaf(), do: true - - @doc """ - Checks if the leaf value represents emptiness. - """ - def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do - leaf_val == false - end - - @doc """ - Computes the union of two boolean leaf values. - The `_typing_ctx` is ignored for this simple ops module. - """ - def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 or leaf2 - end - - @doc """ - Computes the intersection of two boolean leaf values. - The `_typing_ctx` is ignored for this simple ops module. - """ - def intersection_leaves(_typing_ctx, leaf1, leaf2) - when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 and leaf2 - end - - @doc """ - Computes the negation of a boolean leaf value. - The `_typing_ctx` is ignored for this simple ops module. - """ - def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do - not leaf - end - - # def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - # leaf1 and (not leaf2) - # end - - @doc """ - Tests a leaf value to determine if it represents an empty, full, or other set. - For boolean leaves with integers, this mirrors AtomBoolOps and StringBoolOps. - Returns `:empty`, `:full`, or `:other`. - """ - def test_leaf_value(true), do: :full - def test_leaf_value(false), do: :empty - # If integer BDDs could have non-boolean leaves that are not empty/full: - # def test_leaf_value(_other_leaf_value), do: :other -end diff --git a/lib/tilly/bdd/node.ex b/lib/tilly/bdd/node.ex deleted file mode 100644 index 24e9d03..0000000 --- a/lib/tilly/bdd/node.ex +++ /dev/null @@ -1,124 +0,0 @@ -defmodule Tilly.BDD.Node do - @moduledoc """ - Defines the structure of BDD nodes and provides basic helper functions. - - BDD nodes can be one of the following Elixir terms: - - `true`: Represents the universal set BDD. - - `false`: Represents the empty set BDD. - - `{:leaf, leaf_value_id}`: Represents a leaf node. - `leaf_value_id`'s interpretation depends on the specific BDD's `ops_module`. - - `{:split, element_id, positive_child_id, ignore_child_id, negative_child_id}`: - Represents an internal decision node. - `element_id` is the value being split upon. - `positive_child_id`, `ignore_child_id`, `negative_child_id` are IDs of other BDD nodes. - """ - - @typedoc "A BDD node representing the universal set." - @type true_node :: true - - @typedoc "A BDD node representing the empty set." - @type false_node :: false - - @typedoc "A BDD leaf node." - @type leaf_node(leaf_value) :: {:leaf, leaf_value} - - @typedoc "A BDD split node." - @type split_node(element, node_id) :: - {:split, element, node_id, node_id, node_id} - - @typedoc "Any valid BDD node structure." - @type t(element, leaf_value, node_id) :: - true_node() - | false_node() - | leaf_node(leaf_value) - | split_node(element, node_id) - - # --- Smart Constructors (Low-Level) --- - - @doc "Creates a true BDD node." - @spec mk_true() :: true_node() - def mk_true, do: true - - @doc "Creates a false BDD node." - @spec mk_false() :: false_node() - def mk_false, do: false - - @doc "Creates a leaf BDD node." - @spec mk_leaf(leaf_value :: any()) :: leaf_node(any()) - def mk_leaf(leaf_value_id), do: {:leaf, leaf_value_id} - - @doc "Creates a split BDD node." - @spec mk_split( - element_id :: any(), - positive_child_id :: any(), - ignore_child_id :: any(), - negative_child_id :: any() - ) :: split_node(any(), any()) - def mk_split(element_id, positive_child_id, ignore_child_id, negative_child_id) do - {:split, element_id, positive_child_id, ignore_child_id, negative_child_id} - end - - # --- Predicates --- - - @doc "Checks if the node is a true node." - @spec is_true?(node :: t(any(), any(), any())) :: boolean() - def is_true?(true), do: true - def is_true?(_other), do: false - - @doc "Checks if the node is a false node." - @spec is_false?(node :: t(any(), any(), any())) :: boolean() - def is_false?(false), do: true - def is_false?(_other), do: false - - @doc "Checks if the node is a leaf node." - @spec is_leaf?(node :: t(any(), any(), any())) :: boolean() - def is_leaf?({:leaf, _value}), do: true - def is_leaf?(_other), do: false - - @doc "Checks if the node is a split node." - @spec is_split?(node :: t(any(), any(), any())) :: boolean() - def is_split?({:split, _el, _p, _i, _n}), do: true - def is_split?(_other), do: false - - # --- Accessors --- - - @doc """ - Returns the value of a leaf node. - Raises an error if the node is not a leaf node. - """ - @spec value(leaf_node :: leaf_node(any())) :: any() - def value({:leaf, value_id}), do: value_id - def value(other), do: raise(ArgumentError, "Not a leaf node: #{inspect(other)}") - - @doc """ - Returns the element of a split node. - Raises an error if the node is not a split node. - """ - @spec element(split_node :: split_node(any(), any())) :: any() - def element({:split, element_id, _, _, _}), do: element_id - def element(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}") - - @doc """ - Returns the positive child ID of a split node. - Raises an error if the node is not a split node. - """ - @spec positive_child(split_node :: split_node(any(), any())) :: any() - def positive_child({:split, _, p_child_id, _, _}), do: p_child_id - def positive_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}") - - @doc """ - Returns the ignore child ID of a split node. - Raises an error if the node is not a split node. - """ - @spec ignore_child(split_node :: split_node(any(), any())) :: any() - def ignore_child({:split, _, _, i_child_id, _}), do: i_child_id - def ignore_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}") - - @doc """ - Returns the negative child ID of a split node. - Raises an error if the node is not a split node. - """ - @spec negative_child(split_node :: split_node(any(), any())) :: any() - def negative_child({:split, _, _, _, n_child_id}), do: n_child_id - def negative_child(other), do: raise(ArgumentError, "Not a split node: #{inspect(other)}") -end diff --git a/lib/tilly/bdd/ops.ex b/lib/tilly/bdd/ops.ex deleted file mode 100644 index b280ec8..0000000 --- a/lib/tilly/bdd/ops.ex +++ /dev/null @@ -1,347 +0,0 @@ -defmodule Tilly.BDD.Ops do - @moduledoc """ - Generic BDD algorithms and smart constructors. - These functions operate on BDD node IDs and use an `ops_module` - to dispatch to specific element/leaf operations. - """ - - alias Tilly.BDD - alias Tilly.BDD.Node - - @doc """ - Smart constructor for leaf nodes. - Uses the `ops_module` to test if the `leaf_value` corresponds to - an empty or universal set for that module. - Returns `{new_typing_ctx, node_id}`. - """ - def leaf(typing_ctx, leaf_value, ops_module) do - case apply(ops_module, :test_leaf_value, [leaf_value]) do - :empty -> - {typing_ctx, BDD.false_node_id()} - - :full -> - {typing_ctx, BDD.true_node_id()} - - :other -> - logical_structure = Node.mk_leaf(leaf_value) - BDD.get_or_intern_node(typing_ctx, logical_structure, ops_module) - end - end - - @doc """ - Smart constructor for split nodes. Applies simplification rules. - Returns `{new_typing_ctx, node_id}`. - """ - def split(typing_ctx, element, p_id, i_id, n_id, ops_module) do - # Apply simplification rules. Order can be important. - cond do - # If ignore and negative children are False, result is positive child. - BDD.is_false_node?(typing_ctx, i_id) and - BDD.is_false_node?(typing_ctx, n_id) -> - {typing_ctx, p_id} - - # If ignore child is True, the whole BDD is True. - BDD.is_true_node?(typing_ctx, i_id) -> - {typing_ctx, BDD.true_node_id()} - - # If positive and negative children are the same. - p_id == n_id -> - if p_id == i_id do - # All three children are identical. - {typing_ctx, p_id} - else - # Result is p_id (or n_id) unioned with i_id. - # This creates a potential mutual recursion with union_bdds - # which needs to be handled by the apply_op cache. - union_bdds(typing_ctx, p_id, i_id) - end - - # TODO: Add more simplification rules from CDuce bdd.ml `split` as needed. - # e.g. if p=T, i=F, n=T -> True - # e.g. if p=F, i=F, n=T -> not(x) relative to this BDD's element universe (complex) - - true -> - # No further simplification rule applied, intern the node. - logical_structure = Node.mk_split(element, p_id, i_id, n_id) - BDD.get_or_intern_node(typing_ctx, logical_structure, ops_module) - end - end - - @doc """ - Computes the union of two BDDs. - Returns `{new_typing_ctx, result_node_id}`. - """ - def union_bdds(typing_ctx, bdd1_id, bdd2_id) do - apply_op(typing_ctx, :union, bdd1_id, bdd2_id) - end - - @doc """ - Computes the intersection of two BDDs. - Returns `{new_typing_ctx, result_node_id}`. - """ - def intersection_bdds(typing_ctx, bdd1_id, bdd2_id) do - apply_op(typing_ctx, :intersection, bdd1_id, bdd2_id) - end - - @doc """ - Computes the negation of a BDD. - Returns `{new_typing_ctx, result_node_id}`. - """ - def negation_bdd(typing_ctx, bdd_id) do - # The second argument to apply_op is nil for unary operations like negation. - apply_op(typing_ctx, :negation, bdd_id, nil) - end - - @doc """ - Computes the difference of two BDDs (bdd1 - bdd2). - Returns `{new_typing_ctx, result_node_id}`. - Implemented as `bdd1 INTERSECTION (NEGATION bdd2)`. - """ - def difference_bdd(typing_ctx, bdd1_id, bdd2_id) do - {ctx, neg_bdd2_id} = negation_bdd(typing_ctx, bdd2_id) - intersection_bdds(ctx, bdd1_id, neg_bdd2_id) - end - - # Internal function to handle actual BDD operations, bypassing cache for direct calls. - defp do_union_bdds(typing_ctx, bdd1_id, bdd2_id) do - # Ensure canonical order for commutative operations if not handled by apply_op key - # For simplicity, apply_op will handle canonical key generation. - - # 1. Handle terminal cases - cond do - bdd1_id == bdd2_id -> {typing_ctx, bdd1_id} - BDD.is_true_node?(typing_ctx, bdd1_id) -> {typing_ctx, BDD.true_node_id()} - BDD.is_true_node?(typing_ctx, bdd2_id) -> {typing_ctx, BDD.true_node_id()} - BDD.is_false_node?(typing_ctx, bdd1_id) -> {typing_ctx, bdd2_id} - BDD.is_false_node?(typing_ctx, bdd2_id) -> {typing_ctx, bdd1_id} - true -> perform_union(typing_ctx, bdd1_id, bdd2_id) - end - end - - defp perform_union(typing_ctx, bdd1_id, bdd2_id) do - %{structure: s1, ops_module: ops_m1} = BDD.get_node_data(typing_ctx, bdd1_id) - %{structure: s2, ops_module: ops_m2} = BDD.get_node_data(typing_ctx, bdd2_id) - - # For now, assume ops_modules must match for simplicity. - # Production systems might need more complex logic or type errors here. - if ops_m1 != ops_m2 do - raise ArgumentError, - "Cannot union BDDs with different ops_modules: #{inspect(ops_m1)} and #{inspect(ops_m2)}" - end - - ops_m = ops_m1 - - case {s1, s2} do - # Both are leaves - {{:leaf, v1}, {:leaf, v2}} -> - new_leaf_val = apply(ops_m, :union_leaves, [typing_ctx, v1, v2]) - leaf(typing_ctx, new_leaf_val, ops_m) - - # s1 is split, s2 is leaf - {{:split, x1, p1_id, i1_id, n1_id}, {:leaf, _v2}} -> - # CDuce: split x1 p1 (i1 ++ b) n1 - {ctx, new_i1_id} = union_bdds(typing_ctx, i1_id, bdd2_id) - split(ctx, x1, p1_id, new_i1_id, n1_id, ops_m) - - # s1 is leaf, s2 is split - {{:leaf, _v1}, {:split, x2, p2_id, i2_id, n2_id}} -> - # CDuce: split x2 p2 (i2 ++ a) n2 (symmetric to above) - {ctx, new_i2_id} = union_bdds(typing_ctx, i2_id, bdd1_id) - split(ctx, x2, p2_id, new_i2_id, n2_id, ops_m) - - # Both are splits - {{:split, x1, p1_id, i1_id, n1_id}, {:split, x2, p2_id, i2_id, n2_id}} -> - # Compare elements using the ops_module - comp_result = apply(ops_m, :compare_elements, [x1, x2]) - - cond do - comp_result == :eq -> - # Elements are equal, merge children - {ctx0, new_p_id} = union_bdds(typing_ctx, p1_id, p2_id) - {ctx1, new_i_id} = union_bdds(ctx0, i1_id, i2_id) - {ctx2, new_n_id} = union_bdds(ctx1, n1_id, n2_id) - split(ctx2, x1, new_p_id, new_i_id, new_n_id, ops_m) - - comp_result == :lt -> - # x1 < x2 - # CDuce: split x1 p1 (i1 ++ b) n1 - {ctx, new_i1_id} = union_bdds(typing_ctx, i1_id, bdd2_id) - split(ctx, x1, p1_id, new_i1_id, n1_id, ops_m) - - comp_result == :gt -> - # x1 > x2 - # CDuce: split x2 p2 (i2 ++ a) n2 - {ctx, new_i2_id} = union_bdds(typing_ctx, i2_id, bdd1_id) - split(ctx, x2, p2_id, new_i2_id, n2_id, ops_m) - end - end - end - - defp do_intersection_bdds(typing_ctx, bdd1_id, bdd2_id) do - # Canonical order handled by apply_op key generation. - - # Fast path for disjoint singleton BDDs - case {BDD.get_node_data(typing_ctx, bdd1_id), BDD.get_node_data(typing_ctx, bdd2_id)} do - {%{structure: {:split, x1, t, f, f}, ops_module: m}, - %{structure: {:split, x2, t, f, f}, ops_module: m}} - when x1 != x2 -> - {typing_ctx, BDD.false_node_id()} - - _ -> - # 1. Handle terminal cases - cond do - bdd1_id == bdd2_id -> {typing_ctx, bdd1_id} - BDD.is_false_node?(typing_ctx, bdd1_id) -> {typing_ctx, BDD.false_node_id()} - BDD.is_false_node?(typing_ctx, bdd2_id) -> {typing_ctx, BDD.false_node_id()} - BDD.is_true_node?(typing_ctx, bdd1_id) -> {typing_ctx, bdd2_id} - BDD.is_true_node?(typing_ctx, bdd2_id) -> {typing_ctx, bdd1_id} - true -> perform_intersection(typing_ctx, bdd1_id, bdd2_id) - end - end - end - - defp perform_intersection(typing_ctx, bdd1_id, bdd2_id) do - %{structure: s1, ops_module: ops_m1} = BDD.get_node_data(typing_ctx, bdd1_id) - %{structure: s2, ops_module: ops_m2} = BDD.get_node_data(typing_ctx, bdd2_id) - - if ops_m1 != ops_m2 do - raise ArgumentError, - "Cannot intersect BDDs with different ops_modules: #{inspect(ops_m1)} and #{inspect(ops_m2)}" - end - - ops_m = ops_m1 - - case {s1, s2} do - # Both are leaves - {{:leaf, v1}, {:leaf, v2}} -> - new_leaf_val = apply(ops_m, :intersection_leaves, [typing_ctx, v1, v2]) - leaf(typing_ctx, new_leaf_val, ops_m) - - # s1 is split, s2 is leaf - {{:split, x1, p1_id, i1_id, n1_id}, {:leaf, _v2}} -> - {ctx0, new_p1_id} = intersection_bdds(typing_ctx, p1_id, bdd2_id) - {ctx1, new_i1_id} = intersection_bdds(ctx0, i1_id, bdd2_id) - {ctx2, new_n1_id} = intersection_bdds(ctx1, n1_id, bdd2_id) - split(ctx2, x1, new_p1_id, new_i1_id, new_n1_id, ops_m) - - # s1 is leaf, s2 is split - {{:leaf, _v1}, {:split, x2, p2_id, i2_id, n2_id}} -> - {ctx0, new_p2_id} = intersection_bdds(typing_ctx, bdd1_id, p2_id) - {ctx1, new_i2_id} = intersection_bdds(ctx0, bdd1_id, i2_id) - {ctx2, new_n2_id} = intersection_bdds(ctx1, bdd1_id, n2_id) - split(ctx2, x2, new_p2_id, new_i2_id, new_n2_id, ops_m) - - # Both are splits - {{:split, x1, p1_id, i1_id, n1_id}, {:split, x2, p2_id, i2_id, n2_id}} -> - comp_result = apply(ops_m, :compare_elements, [x1, x2]) - - cond do - comp_result == :eq -> - # CDuce: split x1 ((p1**(p2++i2))++(p2**i1)) (i1**i2) ((n1**(n2++i2))++(n2**i1)) - {ctx0, p2_u_i2} = union_bdds(typing_ctx, p2_id, i2_id) - {ctx1, n2_u_i2} = union_bdds(ctx0, n2_id, i2_id) - - {ctx2, p1_i_p2ui2} = intersection_bdds(ctx1, p1_id, p2_u_i2) - {ctx3, p2_i_i1} = intersection_bdds(ctx2, p2_id, i1_id) - {ctx4, new_p_id} = union_bdds(ctx3, p1_i_p2ui2, p2_i_i1) - - {ctx5, new_i_id} = intersection_bdds(ctx4, i1_id, i2_id) - - {ctx6, n1_i_n2ui2} = intersection_bdds(ctx5, n1_id, n2_u_i2) - {ctx7, n2_i_i1} = intersection_bdds(ctx6, n2_id, i1_id) - {ctx8, new_n_id} = union_bdds(ctx7, n1_i_n2ui2, n2_i_i1) - - split(ctx8, x1, new_p_id, new_i_id, new_n_id, ops_m) - - # x1 < x2 - comp_result == :lt -> - # CDuce: split x1 (p1 ** b) (i1 ** b) (n1 ** b) where b is bdd2 - {ctx0, new_p1_id} = intersection_bdds(typing_ctx, p1_id, bdd2_id) - {ctx1, new_i1_id} = intersection_bdds(ctx0, i1_id, bdd2_id) - {ctx2, new_n1_id} = intersection_bdds(ctx1, n1_id, bdd2_id) - split(ctx2, x1, new_p1_id, new_i1_id, new_n1_id, ops_m) - - # x1 > x2 - comp_result == :gt -> - # CDuce: split x2 (a ** p2) (a ** i2) (a ** n2) where a is bdd1 - {ctx0, new_p2_id} = intersection_bdds(typing_ctx, bdd1_id, p2_id) - {ctx1, new_i2_id} = intersection_bdds(ctx0, bdd1_id, i2_id) - {ctx2, new_n2_id} = intersection_bdds(ctx1, bdd1_id, n2_id) - split(ctx2, x2, new_p2_id, new_i2_id, new_n2_id, ops_m) - end - end - end - - defp do_negation_bdd(typing_ctx, bdd_id) do - # 1. Handle terminal cases - cond do - BDD.is_true_node?(typing_ctx, bdd_id) -> {typing_ctx, BDD.false_node_id()} - BDD.is_false_node?(typing_ctx, bdd_id) -> {typing_ctx, BDD.true_node_id()} - true -> perform_negation(typing_ctx, bdd_id) - end - end - - defp perform_negation(typing_ctx, bdd_id) do - %{structure: s, ops_module: ops_m} = BDD.get_node_data(typing_ctx, bdd_id) - - case s do - # Leaf - {:leaf, v} -> - neg_leaf_val = apply(ops_m, :negation_leaf, [typing_ctx, v]) - leaf(typing_ctx, neg_leaf_val, ops_m) - - # Split - {:split, x, p_id, i_id, n_id} -> - # CDuce: ~~i ** split x (~~p) (~~(p++n)) (~~n) - {ctx0, neg_i_id} = negation_bdd(typing_ctx, i_id) - {ctx1, neg_p_id} = negation_bdd(ctx0, p_id) - {ctx2, p_u_n_id} = union_bdds(ctx1, p_id, n_id) - {ctx3, neg_p_u_n_id} = negation_bdd(ctx2, p_u_n_id) - {ctx4, neg_n_id} = negation_bdd(ctx3, n_id) - {ctx5, split_part_id} = split(ctx4, x, neg_p_id, neg_p_u_n_id, neg_n_id, ops_m) - intersection_bdds(ctx5, neg_i_id, split_part_id) - end - end - - # --- Caching Wrapper for BDD Operations --- - defp apply_op(typing_ctx, op_key, bdd1_id, bdd2_id) do - cache_key = make_cache_key(op_key, bdd1_id, bdd2_id) - bdd_store = Map.get(typing_ctx, :bdd_store) - - case Map.get(bdd_store.ops_cache, cache_key) do - nil -> - # Not in cache, compute it - {new_typing_ctx, result_id} = - case op_key do - :union -> do_union_bdds(typing_ctx, bdd1_id, bdd2_id) - :intersection -> do_intersection_bdds(typing_ctx, bdd1_id, bdd2_id) - # bdd2_id is nil here - :negation -> do_negation_bdd(typing_ctx, bdd1_id) - _ -> raise "Unsupported op_key: #{op_key}" - end - - # Store in cache - # IMPORTANT: Use new_typing_ctx (from the operation) to get the potentially updated bdd_store - current_bdd_store_after_op = Map.get(new_typing_ctx, :bdd_store) - new_ops_cache = Map.put(current_bdd_store_after_op.ops_cache, cache_key, result_id) - final_bdd_store_with_cache = %{current_bdd_store_after_op | ops_cache: new_ops_cache} - # And put this updated bdd_store back into new_typing_ctx - final_typing_ctx_with_cache = - Map.put(new_typing_ctx, :bdd_store, final_bdd_store_with_cache) - - {final_typing_ctx_with_cache, result_id} - - cached_result_id -> - {typing_ctx, cached_result_id} - end - end - - defp make_cache_key(:negation, bdd_id, nil), do: {:negation, bdd_id} - - defp make_cache_key(op_key, id1, id2) when op_key in [:union, :intersection] do - # Canonical order for commutative binary operations - if id1 <= id2, do: {op_key, id1, id2}, else: {op_key, id2, id1} - end - - defp make_cache_key(op_key, id1, id2), do: {op_key, id1, id2} -end diff --git a/lib/tilly/bdd/string_bool_ops.ex b/lib/tilly/bdd/string_bool_ops.ex deleted file mode 100644 index 5b9ac18..0000000 --- a/lib/tilly/bdd/string_bool_ops.ex +++ /dev/null @@ -1,87 +0,0 @@ -defmodule Tilly.BDD.StringBoolOps do - @moduledoc """ - BDD operations module for sets of strings. - Elements are strings, and leaf values are booleans. - """ - - @doc """ - Compares two strings. - Returns `:lt`, `:eq`, or `:gt`. - """ - def compare_elements(elem1, elem2) when is_binary(elem1) and is_binary(elem2) do - cond do - elem1 < elem2 -> :lt - elem1 > elem2 -> :gt - true -> :eq - end - end - - @doc """ - Checks if two strings are equal. - """ - def equal_element?(elem1, elem2) when is_binary(elem1) and is_binary(elem2) do - elem1 == elem2 - end - - @doc """ - Hashes a string. - """ - def hash_element(elem) when is_binary(elem) do - # erlang.phash2 is suitable for term hashing - :erlang.phash2(elem) - end - - @doc """ - The leaf value representing an empty set of strings (false). - """ - def empty_leaf(), do: false - - @doc """ - The leaf value representing the universal set of strings (true). - """ - def any_leaf(), do: true - - @doc """ - Checks if a leaf value represents an empty set. - """ - def is_empty_leaf?(leaf_val) when is_boolean(leaf_val) do - leaf_val == false - end - - @doc """ - Computes the union of two leaf values. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def union_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 or leaf2 - end - - @doc """ - Computes the intersection of two leaf values. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def intersection_leaves(_typing_ctx, leaf1, leaf2) - when is_boolean(leaf1) and is_boolean(leaf2) do - leaf1 and leaf2 - end - - @doc """ - Computes the negation of a leaf value. - `typing_ctx` is included for interface consistency, but not used for boolean leaves. - """ - def negation_leaf(_typing_ctx, leaf) when is_boolean(leaf) do - not leaf - end - - # def difference_leaves(_typing_ctx, leaf1, leaf2) when is_boolean(leaf1) and is_boolean(leaf2) do - # leaf1 and (not leaf2) - # end - - @doc """ - Tests a leaf value to determine if it represents an empty, full, or other set. - Returns `:empty`, `:full`, or `:other`. - """ - def test_leaf_value(true), do: :full - def test_leaf_value(false), do: :empty - # def test_leaf_value(_other), do: :other -end diff --git a/lib/tilly/type.ex b/lib/tilly/type.ex deleted file mode 100644 index dd6cee6..0000000 --- a/lib/tilly/type.ex +++ /dev/null @@ -1,57 +0,0 @@ -defmodule Tilly.Type do - @moduledoc """ - Defines the structure of a Type Descriptor (`Descr`) and provides - helper functions for creating fundamental type descriptors. - - A Type Descriptor is a map representing a type. Each field in the map - corresponds to a basic kind of type component (e.g., atoms, integers, pairs) - and holds a BDD node ID. These BDDs represent the set of values - allowed for that particular component of the type. - """ - - alias Tilly.BDD - - @doc """ - Returns a `Descr` map representing the empty type (Nothing). - All BDD IDs in this `Descr` point to the canonical `false` BDD node. - The `typing_ctx` is passed for consistency but not modified by this function. - """ - def empty_descr(_typing_ctx) do - false_id = BDD.false_node_id() - - %{ - atoms_bdd_id: false_id, - integers_bdd_id: false_id, - strings_bdd_id: false_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - # Add other kinds as needed, e.g., for abstract types - } - end - - @doc """ - Returns a `Descr` map representing the universal type (Any). - All BDD IDs in this `Descr` point to the canonical `true` BDD node. - The `typing_ctx` is passed for consistency but not modified by this function. - """ - def any_descr(_typing_ctx) do - true_id = BDD.true_node_id() - - %{ - atoms_bdd_id: true_id, - integers_bdd_id: true_id, - strings_bdd_id: true_id, - pairs_bdd_id: true_id, - records_bdd_id: true_id, - functions_bdd_id: true_id, - # For 'Any', absence is typically not included unless explicitly modeled. - # If 'Any' should include the possibility of absence, this would be true_id. - # For now, let's assume 'Any' means any *value*, so absence is false. - # This can be refined based on the desired semantics of 'Any'. - # CDuce 'Any' does not include 'Absent'. - absent_marker_bdd_id: BDD.false_node_id() - } - end -end diff --git a/lib/tilly/type/ops.ex b/lib/tilly/type/ops.ex deleted file mode 100644 index c712e0b..0000000 --- a/lib/tilly/type/ops.ex +++ /dev/null @@ -1,305 +0,0 @@ -defmodule Tilly.Type.Ops do - @moduledoc """ - Implements set-theoretic operations on Type Descriptors (`Descr` maps) - and provides helper functions for constructing specific types. - Operations work with interned `Descr` IDs. - """ - - alias Tilly.BDD - alias Tilly.Type - alias Tilly.Type.Store - - # Defines the fields in a Descr map that hold BDD IDs. - # Order can be relevant if specific iteration order is ever needed, but for field-wise ops it's not. - defp descr_fields do - [ - :atoms_bdd_id, - :integers_bdd_id, - :strings_bdd_id, - :pairs_bdd_id, - :records_bdd_id, - :functions_bdd_id, - :absent_marker_bdd_id - ] - end - - # --- Core Set Operations --- - - @doc """ - Computes the union of two types represented by their `Descr` IDs. - Returns `{new_typing_ctx, result_descr_id}`. - """ - def union_types(typing_ctx, descr1_id, descr2_id) do - apply_type_op(typing_ctx, :union, descr1_id, descr2_id) - end - - @doc """ - Computes the intersection of two types represented by their `Descr` IDs. - Returns `{new_typing_ctx, result_descr_id}`. - """ - def intersection_types(typing_ctx, descr1_id, descr2_id) do - apply_type_op(typing_ctx, :intersection, descr1_id, descr2_id) - end - - @doc """ - Computes the negation of a type represented by its `Descr` ID. - Returns `{new_typing_ctx, result_descr_id}`. - """ - def negation_type(typing_ctx, descr_id) do - apply_type_op(typing_ctx, :negation, descr_id, nil) - end - - defp do_union_types(typing_ctx, descr1_id, descr2_id) do - descr1 = Store.get_descr_by_id(typing_ctx, descr1_id) - descr2 = Store.get_descr_by_id(typing_ctx, descr2_id) - - {final_ctx, result_fields_map} = - Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} -> - bdd1_id = Map.get(descr1, field) - bdd2_id = Map.get(descr2, field) - {new_ctx, result_bdd_id} = BDD.Ops.union_bdds(current_ctx, bdd1_id, bdd2_id) - {new_ctx, Map.put(acc_fields, field, result_bdd_id)} - end) - - Store.get_or_intern_descr(final_ctx, result_fields_map) - end - - defp do_intersection_types(typing_ctx, descr1_id, descr2_id) do - descr1 = Store.get_descr_by_id(typing_ctx, descr1_id) - descr2 = Store.get_descr_by_id(typing_ctx, descr2_id) - - {final_ctx, result_fields_map} = - Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} -> - bdd1_id = Map.get(descr1, field) - bdd2_id = Map.get(descr2, field) - {new_ctx, result_bdd_id} = BDD.Ops.intersection_bdds(current_ctx, bdd1_id, bdd2_id) - {new_ctx, Map.put(acc_fields, field, result_bdd_id)} - end) - - Store.get_or_intern_descr(final_ctx, result_fields_map) - end - - defp do_negation_type(typing_ctx, descr_id) do - descr = Store.get_descr_by_id(typing_ctx, descr_id) - - {final_ctx, result_fields_map} = - Enum.reduce(descr_fields(), {typing_ctx, %{}}, fn field, {current_ctx, acc_fields} -> - bdd_id = Map.get(descr, field) - - {ctx_after_neg, result_bdd_id} = - if field == :absent_marker_bdd_id do - {current_ctx, BDD.false_node_id()} - else - BDD.Ops.negation_bdd(current_ctx, bdd_id) - end - - {ctx_after_neg, Map.put(acc_fields, field, result_bdd_id)} - end) - - # Re-evaluate context threading if BDD ops significantly alter it beyond caching during reduce. - # The primary context update happens with Store.get_or_intern_descr. - # The reduce passes current_ctx, which accumulates cache updates from BDD ops. - Store.get_or_intern_descr(final_ctx, result_fields_map) - end - - # --- Caching Wrapper for Type Operations --- - defp apply_type_op(typing_ctx, op_key, descr1_id, descr2_id) do - cache_key = make_type_op_cache_key(op_key, descr1_id, descr2_id) - type_store = Map.get(typing_ctx, :type_store) - - case Map.get(type_store.ops_cache, cache_key) do - nil -> - # Not in cache, compute it - {new_typing_ctx, result_id} = - case op_key do - :union -> do_union_types(typing_ctx, descr1_id, descr2_id) - :intersection -> do_intersection_types(typing_ctx, descr1_id, descr2_id) - :negation -> do_negation_type(typing_ctx, descr1_id) # descr2_id is nil here - _ -> raise "Unsupported type op_key: #{op_key}" - end - - # Store in cache (important: use new_typing_ctx to get potentially updated type_store) - current_type_store_after_op = Map.get(new_typing_ctx, :type_store) - new_ops_cache = Map.put(current_type_store_after_op.ops_cache, cache_key, result_id) - final_type_store_with_cache = %{current_type_store_after_op | ops_cache: new_ops_cache} - # And put this updated type_store back into new_typing_ctx - final_typing_ctx_with_cache = Map.put(new_typing_ctx, :type_store, final_type_store_with_cache) - {final_typing_ctx_with_cache, result_id} - - cached_result_id -> - {typing_ctx, cached_result_id} - end - end - - defp make_type_op_cache_key(:negation, descr_id, nil), do: {:negation, descr_id} - defp make_type_op_cache_key(op_key, id1, id2) when op_key in [:union, :intersection] do - if id1 <= id2, do: {op_key, id1, id2}, else: {op_key, id2, id1} - end - defp make_type_op_cache_key(op_key, id1, id2), do: {op_key, id1, id2} - - - # --- Utility Functions --- - @doc """ - Checks if a type represented by its `Descr` ID is the empty type (Nothing). - Does not modify `typing_ctx`. - """ - def is_empty_type?(typing_ctx, descr_id) do - descr_map = Store.get_descr_by_id(typing_ctx, descr_id) - - Enum.all?(descr_fields(), fn field -> - bdd_id = Map.get(descr_map, field) - BDD.is_false_node?(typing_ctx, bdd_id) - end) - end - - # --- Construction Helper Functions --- - - @doc """ - Gets the `Descr` ID for the canonical 'Nothing' type. - """ - def get_type_nothing(typing_ctx) do - empty_descr_map = Type.empty_descr(typing_ctx) - Store.get_or_intern_descr(typing_ctx, empty_descr_map) - end - - @doc """ - Gets the `Descr` ID for the canonical 'Any' type. - """ - def get_type_any(typing_ctx) do - any_descr_map = Type.any_descr(typing_ctx) - Store.get_or_intern_descr(typing_ctx, any_descr_map) - end - - @doc """ - Creates a type `Descr` ID representing a single atom literal. - """ - def create_atom_literal_type(typing_ctx, atom_value) when is_atom(atom_value) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() - - # Create a BDD for the single atom: Split(atom_value, True, False, False) - # The ops_module Tilly.BDD.AtomBoolOps is crucial here. - {ctx1, atom_bdd_id} = - BDD.Ops.split(typing_ctx, atom_value, true_id, false_id, false_id, Tilly.BDD.AtomBoolOps) - - descr_map = %{ - atoms_bdd_id: atom_bdd_id, - integers_bdd_id: false_id, - strings_bdd_id: false_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - - Store.get_or_intern_descr(ctx1, descr_map) - end - - @doc """ - Creates a type `Descr` ID representing a single integer literal. - """ - def create_integer_literal_type(typing_ctx, integer_value) when is_integer(integer_value) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() - - {ctx1, integer_bdd_id} = - BDD.Ops.split(typing_ctx, integer_value, true_id, false_id, false_id, Tilly.BDD.IntegerBoolOps) - - descr_map = %{ - atoms_bdd_id: false_id, - integers_bdd_id: integer_bdd_id, - strings_bdd_id: false_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - Store.get_or_intern_descr(ctx1, descr_map) - end - - @doc """ - Creates a type `Descr` ID representing a single string literal. - """ - def create_string_literal_type(typing_ctx, string_value) when is_binary(string_value) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() - - {ctx1, string_bdd_id} = - BDD.Ops.split(typing_ctx, string_value, true_id, false_id, false_id, Tilly.BDD.StringBoolOps) - - descr_map = %{ - atoms_bdd_id: false_id, - integers_bdd_id: false_id, - strings_bdd_id: string_bdd_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - Store.get_or_intern_descr(ctx1, descr_map) - end - - @doc """ - Gets the `Descr` ID for the type representing all atoms. - """ - def get_primitive_type_any_atom(typing_ctx) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() # This BDD must be interned with :atom_bool_ops if it's not universal - - # For a BDD representing "all atoms", its structure is simply True, - # but it must be associated with :atom_bool_ops. - # BDD.true_node_id() is universal. If we need a specific "true for atoms", - # we'd intern it: BDD.get_or_intern_node(ctx, Node.mk_true(), :atom_bool_ops) - # However, BDD.Ops functions fetch ops_module from operands. - # Universal true/false should work correctly. - - descr_map = %{ - atoms_bdd_id: true_id, - integers_bdd_id: false_id, - strings_bdd_id: false_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - Store.get_or_intern_descr(typing_ctx, descr_map) - end - - @doc """ - Gets the `Descr` ID for the type representing all integers. - """ - def get_primitive_type_any_integer(typing_ctx) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() - - descr_map = %{ - atoms_bdd_id: false_id, - integers_bdd_id: true_id, - strings_bdd_id: false_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - Store.get_or_intern_descr(typing_ctx, descr_map) - end - - @doc """ - Gets the `Descr` ID for the type representing all strings. - """ - def get_primitive_type_any_string(typing_ctx) do - false_id = BDD.false_node_id() - true_id = BDD.true_node_id() - - descr_map = %{ - atoms_bdd_id: false_id, - integers_bdd_id: false_id, - strings_bdd_id: true_id, - pairs_bdd_id: false_id, - records_bdd_id: false_id, - functions_bdd_id: false_id, - absent_marker_bdd_id: false_id - } - Store.get_or_intern_descr(typing_ctx, descr_map) - end -end diff --git a/lib/tilly/type/store.ex b/lib/tilly/type/store.ex deleted file mode 100644 index db0c670..0000000 --- a/lib/tilly/type/store.ex +++ /dev/null @@ -1,79 +0,0 @@ -defmodule Tilly.Type.Store do - @moduledoc """ - Manages the interning (hash-consing) of Type Descriptor maps (`Descr` maps). - Ensures that for any unique `Descr` map, there is one canonical integer ID. - The type store is expected to be part of a `typing_ctx` map under the key `:type_store`. - """ - - @initial_next_descr_id 0 - - @doc """ - Initializes the type store within the typing context. - """ - def init_type_store(typing_ctx) when is_map(typing_ctx) do - type_store = %{ - descrs_by_structure: %{}, - structures_by_id: %{}, - next_descr_id: @initial_next_descr_id, - ops_cache: %{} # Cache for type operations {op_key, descr_id1, descr_id2} -> result_descr_id - } - - Map.put(typing_ctx, :type_store, type_store) - end - - @doc """ - Gets an existing Type Descriptor ID or interns a new one if it's not already in the store. - - All BDD IDs within the `descr_map` must already be canonical integer IDs. - - Returns a tuple `{new_typing_ctx, descr_id}`. - The `typing_ctx` is updated if a new `Descr` is interned. - """ - def get_or_intern_descr(typing_ctx, descr_map) do - type_store = Map.get(typing_ctx, :type_store) - - unless type_store do - raise ArgumentError, "Type store not initialized in typing_ctx. Call init_type_store first." - end - - # The descr_map itself is the key for interning. - # Assumes BDD IDs within descr_map are already canonical. - case Map.get(type_store.descrs_by_structure, descr_map) do - nil -> - # Descr not found, intern it - descr_id = type_store.next_descr_id - - new_descrs_by_structure = Map.put(type_store.descrs_by_structure, descr_map, descr_id) - new_structures_by_id = Map.put(type_store.structures_by_id, descr_id, descr_map) - new_next_descr_id = descr_id + 1 - - new_type_store = - %{ - type_store - | descrs_by_structure: new_descrs_by_structure, - structures_by_id: new_structures_by_id, - next_descr_id: new_next_descr_id - } - - new_typing_ctx = Map.put(typing_ctx, :type_store, new_type_store) - {new_typing_ctx, descr_id} - - existing_descr_id -> - # Descr found - {typing_ctx, existing_descr_id} - end - end - - @doc """ - Retrieves the `Descr` map from the type store given its ID. - Returns the `Descr` map or `nil` if not found. - """ - def get_descr_by_id(typing_ctx, descr_id) do - with %{type_store: %{structures_by_id: structures_by_id}} <- typing_ctx, - descr when not is_nil(descr) <- Map.get(structures_by_id, descr_id) do - descr - else - _ -> nil - end - end -end diff --git a/mix.exs b/mix.exs index 7d2a9a4..850d0c3 100644 --- a/mix.exs +++ b/mix.exs @@ -26,6 +26,7 @@ defmodule Til.MixProject do [ # {:dep_from_hexpm, "~> 0.3.0"}, # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"} + {:ex_unit_summary, "~> 0.1.0", only: [:dev, :test]} ] end end diff --git a/mix.lock b/mix.lock new file mode 100644 index 0000000..6645093 --- /dev/null +++ b/mix.lock @@ -0,0 +1,3 @@ +%{ + "ex_unit_summary": {:hex, :ex_unit_summary, "0.1.0", "7b0352afc5e6a933c805df0a539b66b392ac12ba74d8b208db7d83f77cb57049", [:mix], [], "hexpm", "8c87d0deade3657102902251d2ec60b5b94560004ce0e2c2fa5b466232716bd6"}, +} diff --git a/new.exs b/new.exs index 7cc8823..9c370f6 100644 --- a/new.exs +++ b/new.exs @@ -1,4 +1,9 @@ +Mix.install([:ex_unit_summary]) +# Start ExUnitSummary application, with recommended config +ExUnitSummary.start(:normal, %{__struct__: ExUnitSummary.Config, filter_results: :failed, print_delay: 100}) +# Add ExUnitSummary.Formatter to list of ExUnit's formatters. +ExUnit.configure(formatters: [ExUnit.CLIFormatter, ExUnitSummary.Formatter]) Code.require_file("./debug.exs") defmodule Tdd.TypeSpec do @@ -2078,976 +2083,419 @@ end #### # xxx #### -# The following test runner files are unchanged, as they correctly validate the behavior -# of the public APIs. After the refactoring, they should all continue to pass, -# and the previously crashing test for recursive types should now pass as well. -# I am including them for a complete, runnable script. -# ... Test runner files ... -defmodule TddStoreTests do - def test(name, expected, result) do - if expected == result do - IO.puts("[PASS] #{name}") - else - IO.puts("[FAIL] #{name}") - IO.puts(" Expected: #{inspect(expected)}") - IO.puts(" Got: #{inspect(result)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - end - end +ExUnit.start(autorun: false) - def run() do - IO.puts("\n--- Running Tdd.Store Tests ---") - Tdd.Store.init() - Process.put(:test_failures, []) +defmodule TddSystemTest do + use ExUnit.Case, async: false # Most tests mutate Tdd.Store, so they cannot run concurrently. - # --- Test Setup --- - Tdd.Store.init() - - # --- Test Cases --- - IO.puts("\n--- Section: Initialization and Terminals ---") - test("true_node_id returns 1", 1, Tdd.Store.true_node_id()) - test("false_node_id returns 0", 0, Tdd.Store.false_node_id()) - test("get_node for ID 1 returns true_terminal", {:ok, :true_terminal}, Tdd.Store.get_node(1)) - - test( - "get_node for ID 0 returns false_terminal", - {:ok, :false_terminal}, - Tdd.Store.get_node(0) - ) - - test( - "get_node for unknown ID returns not_found", - {:error, :not_found}, - Tdd.Store.get_node(99) - ) - - IO.puts("\n--- Section: Node Creation and Structural Sharing ---") - # Define some opaque variables - var_a = {:is_atom} - var_b = {:is_integer} - true_id = Tdd.Store.true_node_id() - false_id = Tdd.Store.false_node_id() - - # Create a new node. It should get ID 2. - id1 = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id) - test("First created node gets ID 2", 2, id1) - - # Verify its content - test( - "get_node for ID 2 returns the correct tuple", - {:ok, {var_a, true_id, false_id, false_id}}, - Tdd.Store.get_node(id1) - ) - - # Create another, different node. It should get ID 3. - id2 = Tdd.Store.find_or_create_node(var_b, id1, false_id, false_id) - test("Second created node gets ID 3", 3, id2) - - # Attempt to create the first node again. - id1_again = Tdd.Store.find_or_create_node(var_a, true_id, false_id, false_id) - - test( - "Attempting to create an existing node returns the same ID (Structural Sharing)", - id1, - id1_again - ) - - # Check that next_id was not incremented by the shared call - id3 = Tdd.Store.find_or_create_node(var_b, true_id, false_id, false_id) - test("Next new node gets the correct ID (4)", 4, id3) - - IO.puts("\n--- Section: Basic Reduction Rule ---") - # Create a node where all children are the same. - id_redundant = Tdd.Store.find_or_create_node(var_a, id3, id3, id3) - - test( - "A node with identical children reduces to the child's ID", - id3, - id_redundant - ) - - IO.puts("\n--- Section: Caching ---") - cache_key = {:my_op, 1, 2} - test("Cache is initially empty for a key", :not_found, Tdd.Store.get_op_cache(cache_key)) - Tdd.Store.put_op_cache(cache_key, :my_result) - - test( - "Cache returns the stored value after put", - {:ok, :my_result}, - Tdd.Store.get_op_cache(cache_key) - ) - - Tdd.Store.put_op_cache(cache_key, :new_result) - test("Cache can be updated", {:ok, :new_result}, Tdd.Store.get_op_cache(cache_key)) - - # --- Final Report --- - failures = Process.get(:test_failures, []) - - if failures == [] do - IO.puts("\n✅ All Tdd.Store tests passed!") - else - IO.puts("\n❌ Found #{length(failures)} test failures.") - end - end -end - -defmodule TypeSpecTests do alias Tdd.TypeSpec - - # Simple test helper function - defp test(name, expected, tested) do - current_failures = Process.get(:test_failures, []) - result = TypeSpec.normalize(tested) - # Use a custom comparison to handle potentially unsorted lists in expected values - # The normalize function *should* sort, but this makes tests more robust. - is_equal = - case {expected, result} do - {{:union, list1}, {:union, list2}} -> Enum.sort(list1) == Enum.sort(list2) - {{:intersect, list1}, {:intersect, list2}} -> Enum.sort(list1) == Enum.sort(list2) - _ -> expected == result - end - - if is_equal do - IO.puts("[PASS] #{name}") - else - IO.puts("[FAIL] #{name}") - IO.puts(" tested: #{inspect(tested)}") - IO.puts(" Expected: #{inspect(expected)}") - IO.puts(" Got: #{inspect(result)}") - Process.put(:test_failures, [name | current_failures]) - end - end - - def run() do - IO.puts("\n--- Running Tdd.TypeSpec.normalize/1 Tests ---") - Tdd.Store.init() - Process.put(:test_failures, []) - - # --- Test Section: Base & Simple Types --- - IO.puts("\n--- Section: Base & Simple Types ---") - test("Normalizing :any is idempotent", :any, :any) - test("Normalizing :none is idempotent", :none, :none) - test("Normalizing :atom is idempotent", :atom, :atom) - test("Normalizing a literal is idempotent", {:literal, :foo}, {:literal, :foo}) - - # --- Test Section: Double Negation --- - IO.puts("\n--- Section: Double Negation ---") - test("¬(¬atom) simplifies to atom", :atom, {:negation, {:negation, :atom}}) - test("A single negation is preserved", {:negation, :integer}, {:negation, :integer}) - - test( - "¬(¬(¬atom)) simplifies to ¬atom", - {:negation, :atom}, - {:negation, {:negation, {:negation, :atom}}} - ) - - # --- Test Section: Union Normalization --- - IO.puts("\n--- Section: Union Normalization ---") - - test( - "Flattens nested unions", - {:union, [:atom, :integer, :list]}, - {:union, [:integer, {:union, [:list, :atom]}]} - ) - - test( - "Sorts members of a union", - {:union, [:atom, :integer, :list]}, - {:union, [:list, :integer, :atom]} - ) - - test( - "Removes duplicates in a union", - {:union, [:atom, :integer]}, - {:union, [:integer, :atom, :integer]} - ) - - test("Simplifies a union with :none (A | none -> A)", :atom, {:union, [:atom, :none]}) - test("Simplifies a union with :any (A | any -> any)", :any, {:union, [:atom, :any]}) - test("An empty union simplifies to :none", :none, {:union, []}) - test("A union containing only :none simplifies to :none", :none, {:union, [:none, :none]}) - test("A union of a single element simplifies to the element itself", :atom, {:union, [:atom]}) - - # --- Test Section: Intersection Normalization --- - IO.puts("\n--- Section: Intersection Normalization ---") - - test( - "Flattens nested intersections", - {:intersect, [:atom, :integer, :list]}, - {:intersect, [:integer, {:intersect, [:list, :atom]}]} - ) - - test( - "Sorts members of an intersection", - {:intersect, [:atom, :integer, :list]}, - {:intersect, [:list, :integer, :atom]} - ) - - test( - "Removes duplicates in an intersection", - {:intersect, [:atom, :integer]}, - {:intersect, [:integer, :atom, :integer]} - ) - - test( - "Simplifies an intersection with :any (A & any -> A)", - :atom, - {:intersect, [:atom, :any]} - ) - - test( - "Simplifies an intersection with :none (A & none -> none)", - :none, - {:intersect, [:atom, :none]} - ) - - test("An empty intersection simplifies to :any", :any, {:intersect, []}) - - test( - "An intersection of a single element simplifies to the element itself", - :atom, - {:intersect, [:atom]} - ) - - # --- Test Section: Recursive Normalization --- - IO.puts("\n--- Section: Recursive Normalization ---") - - test( - "Recursively normalizes elements in a tuple", - {:tuple, [:atom, {:union, [{:literal, :a}, {:literal, :b}]}]}, - {:tuple, [{:union, [:atom]}, {:union, [{:literal, :a}, {:literal, :b}]}]} - ) - - test( - "Recursively normalizes head and tail in a cons", - {:cons, :any, {:negation, :integer}}, - {:cons, {:union, [:atom, :any]}, {:negation, {:union, [:integer]}}} - ) - - test( - "Recursively normalizes element in list_of", - {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :list, {:type_var, :m_var0}}]}}, - {:list_of, {:intersect, [:any, :list]}} - ) - - test( - "Recursively normalizes sub-spec in negation", - {:negation, {:union, [{:literal, :a}, {:literal, :b}]}}, - {:negation, {:union, [{:literal, :a}, {:literal, :b}]}} - ) - - # --- Test Section: Complex Nested Cases --- - IO.puts("\n--- Section: Complex Nested Cases ---") - - complex_spec = - {:union, - [ - :atom, - # simplifies to :integer - {:intersect, [:any, :integer, {:intersect, [:integer]}]}, - # simplifies to :list - {:union, [:none, :list]} - ]} - - test( - "Handles complex nested simplifications correctly", - {:union, [:atom, :integer, :list]}, - complex_spec - ) - - # --- Final Report --- - failures = Process.get(:test_failures, []) - - if failures == [] do - IO.puts("\n✅ All TypeSpec tests passed!") - else - IO.puts("\n❌ Found #{length(failures)} test failures:") - Enum.each(failures, &IO.puts(" - #{&1}")) - end - end -end - -defmodule TddVariableTests do + alias Tdd.Store alias Tdd.Variable - alias Tdd.TypeSpec - - # Simple test helper function - defp test(name, expected, result) do - current_failures = Process.get(:test_failures, []) - - if expected == result do - IO.puts("[PASS] #{name}") - else - IO.puts("[FAIL] #{name}") - IO.puts(" Expected: #{inspect(expected)}") - IO.puts(" Got: #{inspect(result)}") - Process.put(:test_failures, [name | current_failures]) - end - end - - def run() do - IO.puts("\n--- Running Tdd.Variable Tests ---") - Process.put(:test_failures, []) - # Setup for TDD IDs - Tdd.Store.init() - # Use a dummy context for these simple types - id_atom = Tdd.Compiler.spec_to_id(:atom) - id_integer = Tdd.Compiler.spec_to_id(:integer) - - # --- Test Section: Variable Structure --- - IO.puts("\n--- Section: Variable Structure ---") - test("v_is_atom returns correct tuple", {0, :is_atom, nil, nil}, Tdd.Variable.v_is_atom()) - test("v_atom_eq returns correct tuple", {1, :value, :foo, nil}, Tdd.Variable.v_atom_eq(:foo)) - test("v_int_lt returns correct tuple", {2, :alt, 10, nil}, Tdd.Variable.v_int_lt(10)) - - test( - "v_tuple_size_eq returns correct tuple", - {4, :a_size, 2, nil}, - Tdd.Variable.v_tuple_size_eq(2) - ) - - test( - "v_tuple_elem_pred nests a TDD ID correctly", - {4, :b_element, 0, id_integer}, - Tdd.Variable.v_tuple_elem_pred(0, id_integer) - ) - - test( - "v_list_is_empty returns correct tuple", - {5, :b_is_empty, nil, nil}, - Tdd.Variable.v_list_is_empty() - ) - - test( - "v_list_head_pred nests a TDD ID correctly", - {5, :c_head, id_atom, nil}, - Tdd.Variable.v_list_head_pred(id_atom) - ) - - # --- Test Section: Global Ordering --- - IO.puts("\n--- Section: Global Ordering (Based on Elixir Term Comparison) ---") - # Category 0 < Category 1 - test( - "Primary type var < Atom property var", - true, - Tdd.Variable.v_is_tuple() < Tdd.Variable.v_atom_eq(:anything) - ) - - # Within Category 2: alt < beq < cgt - test( - "Integer :lt var < Integer :eq var", - true, - Tdd.Variable.v_int_lt(10) < Tdd.Variable.v_int_eq(10) - ) - - test( - "Integer :eq var < Integer :gt var", - true, - Tdd.Variable.v_int_eq(10) < Tdd.Variable.v_int_gt(10) - ) - - # Within Category 2: comparison of value - test( - "Integer :eq(5) var < Integer :eq(10) var", - true, - Tdd.Variable.v_int_eq(5) < Tdd.Variable.v_int_eq(10) - ) - - # Within Category 4: comparison of index - test( - "Tuple elem(0) var < Tuple elem(1) var", - true, - Tdd.Variable.v_tuple_elem_pred(0, id_atom) < - Tdd.Variable.v_tuple_elem_pred(1, id_atom) - ) - - # Within Category 4, same index: comparison of nested ID - test( - "Tuple elem(0, id_atom) var vs Tuple elem(0, id_int) var", - id_atom < id_integer, - Tdd.Variable.v_tuple_elem_pred(0, id_atom) < - Tdd.Variable.v_tuple_elem_pred(0, id_integer) - ) - - test( - "List :b_is_empty var < List :c_head var", - true, - Tdd.Variable.v_list_is_empty() < Tdd.Variable.v_list_head_pred(id_atom) - ) - - test( - "List :c_head var < List :tail var", - true, - Tdd.Variable.v_list_head_pred(id_atom) < - Tdd.Variable.v_list_tail_pred(id_atom) - ) - - # --- Final Report --- - failures = Process.get(:test_failures, []) - - if failures == [] do - IO.puts("\n✅ All Tdd.Variable tests passed!") - else - IO.puts("\n❌ Found #{length(failures)} test failures.") - end - end -end - -defmodule ConsistencyEngineTests do + alias Tdd.Compiler alias Tdd.Consistency.Engine - alias Tdd.Variable + alias Tdd.Algo - defp test(name, expected, assumptions_map) do - result = Engine.check(assumptions_map) - # ... test reporting logic ... - is_ok = expected == result - status = if is_ok, do: "[PASS]", else: "[FAIL]" - IO.puts("#{status} #{name}") + # Helper to mimic the old test structure and provide better failure messages + # for spec comparisons. + defp assert_spec_normalized(expected, input_spec) do + result = TypeSpec.normalize(input_spec) + # The normalization process should produce a canonical, sorted form. + assert expected == result, """ + Input Spec: + #{inspect(input_spec, pretty: true)} - unless is_ok do - IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(result)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) + Expected Normalized: + #{inspect(expected, pretty: true)} + + Actual Normalized: + #{inspect(result, pretty: true)} + """ + end + + # Helper to check for equivalence by comparing TDD IDs. + defmacro assert_equivalent_specs(spec1, spec2) do + quote do + assert Compiler.spec_to_id(unquote(spec1)) == Compiler.spec_to_id(unquote(spec2)) + end + end + + # Helper to check for subtyping using the TDD compiler. + defmacro assert_subtype(spec1, spec2) do + quote do + assert Compiler.is_subtype(unquote(spec1), unquote(spec2)) end end - def run() do - IO.puts("\n--- Running Tdd.Consistency.Engine Tests ---") - Process.put(:test_failures, []) +defmacro refute_subtype(spec1, spec2) do + quote do + refute Compiler.is_subtype(unquote(spec1), unquote(spec2)) + end + end - # Setup TDD IDs + # Setup block that initializes the Tdd.Store before each test. + # This ensures that node IDs and caches are clean for every test case. + setup do Tdd.Store.init() - id_atom = Tdd.Compiler.spec_to_id(:atom) + :ok + end - # --- Section: Basic & Implication Tests --- - IO.puts("\n--- Section: Basic & Implication Tests ---") - test("An empty assumption map is consistent", :consistent, %{}) - test("A single valid assumption is consistent", :consistent, %{Variable.v_is_atom() => true}) + # --- + # Tdd.Store Tests + # These tests validate the lowest-level state management of the TDD system. + # The Store is responsible for creating and storing the nodes of the decision diagram graph. + # --- + describe "Tdd.Store: Core state management for the TDD graph" do + @doc """ + Tests that the store initializes with the correct, reserved IDs for the + terminal nodes representing TRUE (:any) and FALSE (:none). + """ + test "initialization and terminals" do + assert Store.true_node_id() == 1 + assert Store.false_node_id() == 0 + assert Store.get_node(1) == {:ok, :true_terminal} + assert Store.get_node(0) == {:ok, :false_terminal} + assert Store.get_node(99) == {:error, :not_found} + end - test( - "An implied contradiction is caught by expander", - :contradiction, - %{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false} - ) + @doc """ + Tests the core functionality of creating nodes. It verifies that new nodes receive + incrementing IDs and that requesting an identical node reuses the existing one + (structural sharing), which is fundamental to the efficiency of TDDs. + """ + test "node creation and structural sharing" do + var_a = {:is_atom} + var_b = {:is_integer} + true_id = Store.true_node_id() + false_id = Store.false_node_id() - test( - "Implication creates a consistent set", - :consistent, - # implies is_atom=true - %{Variable.v_atom_eq(:foo) => true} - ) + # First created node gets ID 2 (after 0 and 1 are taken by terminals) + id1 = Store.find_or_create_node(var_a, true_id, false_id, false_id) + assert id1 == 2 + assert Store.get_node(id1) == {:ok, {var_a, true_id, false_id, false_id}} - # --- Section: Primary Type Exclusivity --- - IO.puts("\n--- Section: Primary Type Exclusivity ---") + # Second, different node gets the next ID + id2 = Store.find_or_create_node(var_b, id1, false_id, false_id) + assert id2 == 3 - test( - "Two primary types cannot both be true", - :contradiction, - %{Variable.v_is_atom() => true, Variable.v_is_integer() => true} - ) + # Creating the first node again returns the same ID, not a new one + id1_again = Store.find_or_create_node(var_a, true_id, false_id, false_id) + assert id1_again == id1 - test( - "Two primary types implied to be true is a contradiction", - :contradiction, - %{Variable.v_atom_eq(:foo) => true, Variable.v_int_eq(5) => true} - ) + # Next new node gets the correct subsequent ID, proving no ID was wasted + id3 = Store.find_or_create_node(var_b, true_id, false_id, false_id) + assert id3 == 4 + end - test( - "One primary type true and another false is consistent", - :consistent, - %{Variable.v_is_atom() => true, Variable.v_is_integer() => false} - ) + @doc """ + Tests a key reduction rule: if a node's 'yes', 'no', and 'don't care' branches + all point to the same child node, the parent node is redundant and should be + replaced by the child node itself. + """ + test "node reduction rule for identical children" do + var_a = {:is_atom} + id3 = 4 # from previous test logic + id_redundant = Store.find_or_create_node(var_a, id3, id3, id3) + assert id_redundant == id3 + end - # --- Section: Atom Consistency --- - IO.puts("\n--- Section: Atom Consistency ---") + @doc """ + Tests the memoization cache for operations like 'apply', 'negate', etc. + This ensures that repeated operations with the same inputs do not trigger + redundant computations. + """ + test "operation caching" do + cache_key = {:my_op, 1, 2} + assert Store.get_op_cache(cache_key) == :not_found - test( - "An atom cannot equal two different values", - :contradiction, - %{Variable.v_atom_eq(:foo) => true, Variable.v_atom_eq(:bar) => true} - ) + Store.put_op_cache(cache_key, :my_result) + assert Store.get_op_cache(cache_key) == {:ok, :my_result} - test( - "An atom can equal one value", - :consistent, - %{Variable.v_atom_eq(:foo) => true} - ) + Store.put_op_cache(cache_key, :new_result) + assert Store.get_op_cache(cache_key) == {:ok, :new_result} + end + end - # --- Section: List Flat Consistency --- - IO.puts("\n--- Section: List Flat Consistency ---") + # --- + # Tdd.TypeSpec.normalize/1 Tests + # These tests focus on ensuring the `normalize` function correctly transforms + # any TypeSpec into its canonical, simplified form. + # --- + describe "Tdd.TypeSpec.normalize/1: Base & Simple Types" do + @doc "Tests that normalizing already-simple specs doesn't change them (idempotency)." + test "normalizing :any is idempotent" do + assert_spec_normalized(:any, :any) + end + test "normalizing :none is idempotent" do + assert_spec_normalized(:none, :none) + end + test "normalizing :atom is idempotent" do + assert_spec_normalized(:atom, :atom) + end + test "normalizing a literal is idempotent" do + assert_spec_normalized({:literal, :foo}, {:literal, :foo}) + end + end - test( - "A list cannot be empty and have a head property", - :contradiction, - %{ + describe "Tdd.TypeSpec.normalize/1: Double Negation" do + @doc "Tests the logical simplification that ¬(¬A) is equivalent to A." + test "¬(¬atom) simplifies to atom" do + assert_spec_normalized(:atom, {:negation, {:negation, :atom}}) + end + @doc "Tests that a single negation is preserved when it cannot be simplified further." + test "A single negation is preserved" do + assert_spec_normalized({:negation, :integer}, {:negation, :integer}) + end + @doc "Tests that an odd number of negations simplifies to a single negation." + test "¬(¬(¬atom)) simplifies to ¬atom" do + assert_spec_normalized({:negation, :atom}, {:negation, {:negation, {:negation, :atom}}}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Union Normalization" do + @doc """ + Tests that unions are canonicalized by flattening nested unions, sorting the members, + and removing duplicates. e.g., `int | (list | atom | int)` becomes `(atom | int | list)`. + """ + test "flattens, sorts, and uniques members" do + input = {:union, [:integer, {:union, [:list, :atom, :integer]}]} + expected = {:union, [:atom, :integer, :list]} + assert_spec_normalized(expected, input) + end + @doc "Tests `A | none` simplifies to `A`, as `:none` is the identity for union." + test "simplifies a union with :none (A | none -> A)" do + assert_spec_normalized(:atom, {:union, [:atom, :none]}) + end + @doc "Tests `A | any` simplifies to `any`, as `:any` is the absorbing element for union." + test "simplifies a union with :any (A | any -> any)" do + assert_spec_normalized(:any, {:union, [:atom, :any]}) + end + @doc "An empty set of types is logically equivalent to `:none`." + test "an empty union simplifies to :none" do + assert_spec_normalized(:none, {:union, []}) + end + @doc "A union containing just one type should simplify to that type itself." + test "a union of a single element simplifies to the element itself" do + assert_spec_normalized(:atom, {:union, [:atom]}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Intersection Normalization" do + @doc "Tests that intersections are canonicalized like unions (flatten, sort, unique)." + test "flattens, sorts, and uniques members" do + input = {:intersect, [:integer, {:intersect, [:list, :atom, :integer]}]} + expected = {:intersect, [:atom, :integer, :list]} + assert_spec_normalized(expected, input) + end + @doc "Tests `A & any` simplifies to `A`, as `:any` is the identity for intersection." + test "simplifies an intersection with :any (A & any -> A)" do + assert_spec_normalized(:atom, {:intersect, [:atom, :any]}) + end + @doc "Tests `A & none` simplifies to `none`, as `:none` is the absorbing element." + test "simplifies an intersection with :none (A & none -> none)" do + assert_spec_normalized(:none, {:intersect, [:atom, :none]}) + end + @doc "An intersection of zero types is logically `any` (no constraints)." + test "an empty intersection simplifies to :any" do + assert_spec_normalized(:any, {:intersect, []}) + end + @doc "An intersection of one type simplifies to the type itself." + test "an intersection of a single element simplifies to the element itself" do + assert_spec_normalized(:atom, {:intersect, [:atom]}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Subtype Reduction" do + @doc """ + Tests a key simplification: if a union contains a type and its own subtype, + the subtype is redundant and should be removed. E.g., `(1 | integer)` is just `integer`. + Here, `:foo` and `:bar` are subtypes of `:atom`, so the union simplifies to `:atom`. + """ + test "(:foo | :bar | atom) simplifies to atom" do + input = {:union, [{:literal, :foo}, {:literal, :bar}, :atom]} + expected = :atom + assert_spec_normalized(expected, input) + end + end + + describe "Tdd.TypeSpec: Advanced Normalization (μ, Λ, Apply)" do + @doc """ + Tests alpha-conversion for recursive types. The bound variable name (`:X`) + should be renamed to a canonical name (`:m_var0`) to ensure structural equality + regardless of the name chosen by the user. + """ + test "basic alpha-conversion for μ-variable" do + input = {:mu, :X, {:type_var, :X}} + expected = {:mu, :m_var0, {:type_var, :m_var0}} + assert_spec_normalized(expected, input) + end + + @doc """ + Tests that the syntactic sugar `{:list_of, T}` is correctly desugared into + its underlying recursive definition: `μT.[] | cons(T, μT)`. + """ + test "list_of(integer) normalizes to a μ-expression with canonical var" do + input = {:list_of, :integer} + expected = {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}} + assert_spec_normalized(expected, input) + end + + @doc """ + Tests beta-reduction (function application). Applying the identity function + `(ΛT.T)` to `integer` should result in `integer`. + """ + test "simple application: (ΛT.T) integer -> integer" do + input = {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]} + expected = :integer + assert_spec_normalized(expected, input) + end + + @doc """ + Tests a more complex beta-reduction. Applying a list constructor lambda + to `:atom` should produce the normalized form of `list_of(atom)`. + """ + test "application with structure: (ΛT. list_of(T)) atom -> list_of(atom) (normalized form)" do + input = {:type_apply, {:type_lambda, [:T], {:list_of, {:type_var, :T}}}, [:atom]} + expected = {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}} + assert_spec_normalized(expected, input) + end + end + + # --- + # Tdd.Consistency.Engine Tests + # These tests validate the logic that detects contradictions in a set of predicate assumptions. + # --- + describe "Tdd.Consistency.Engine: Logic for detecting contradictions" do + # This setup is local to this describe block, which is fine. + setup do + Tdd.Store.init() + id_atom = Tdd.Compiler.spec_to_id(:atom) + %{id_atom: id_atom} + end + + @doc "An empty set of assumptions has no contradictions." + test "an empty assumption map is consistent" do + assert Engine.check(%{}) == :consistent + end + + @doc """ + Tests that the engine uses predicate traits to find implied contradictions. + `v_atom_eq(:foo)` implies `v_is_atom()` is true, which contradicts the explicit + assumption that `v_is_atom()` is false. + """ + test "an implied contradiction is caught by expander" do + assumptions = %{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false} + assert Engine.check(assumptions) == :contradiction + end + + @doc "A term cannot belong to two different primary types like :atom and :integer." + test "two primary types cannot both be true" do + assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true} + assert Engine.check(assumptions) == :contradiction + end + + @doc "A list cannot be empty and simultaneously have properties on its head (which wouldn't exist)." + test "a list cannot be empty and have a head property", %{id_atom: id_atom} do + assumptions = %{ Variable.v_list_is_empty() => true, Variable.v_list_head_pred(id_atom) => true } - ) + assert Engine.check(assumptions) == :contradiction + end - test( - "A non-empty list can have a head property", - :consistent, - %{ - Variable.v_list_is_empty() => false, - Variable.v_list_head_pred(id_atom) => true + @doc "Tests for logical contradictions in integer ranges." + test "int < 10 AND int > 20 is a contradiction" do + assumptions = %{ + Variable.v_int_lt(10) => true, + Variable.v_int_gt(20) => true } - ) + assert Engine.check(assumptions) == :contradiction + end + end + + # --- + # Compiler & Algo Integration Tests + # These tests ensure that the high-level public APIs (`is_subtype`, `spec_to_id`) + # work correctly by integrating the compiler and the graph algorithms. + # --- + describe "Tdd.Compiler and Tdd.Algo Integration: High-level API validation" do + @doc "Verifies semantic equivalence of types using TDD IDs. e.g., `atom & any` is the same type as `atom`." + test "basic equivalences" do + assert_equivalent_specs({:intersect, [:atom, :any]}, :atom) + assert_equivalent_specs({:union, [:atom, :none]}, :atom) + assert_equivalent_specs({:intersect, [:atom, :integer]}, :none) + end - test( - "A non-empty list is implied by head property", - :consistent, - # implies is_empty=false - %{Variable.v_list_head_pred(id_atom) => true} - ) + @doc "Tests the main `is_subtype` public API for simple, non-recursive types." + test "basic subtyping" do + assert_subtype({:literal, :foo}, :atom) + refute_subtype(:atom, {:literal, :foo}) + assert_subtype(:none, :atom) + assert_subtype(:atom, :any) + end - # --- Section: Integer Consistency --- - IO.puts("\n--- Section: Integer Consistency ---") - test("int == 5 is consistent", :consistent, %{Variable.v_int_eq(5) => true}) + @doc "Tests that impossible type intersections compile to the `:none` (FALSE) node." + test "contradictions" do + assert Compiler.spec_to_id({:intersect, [:atom, :integer]}) == Store.false_node_id() + assert Compiler.spec_to_id({:intersect, [{:literal, :foo}, {:literal, :bar}]}) == Store.false_node_id() + end + end - test("int == 5 AND int == 10 is a contradiction", :contradiction, %{ - Variable.v_int_eq(5) => true, - Variable.v_int_eq(10) => true - }) + # --- + # Tdd.Compiler Advanced Feature Tests + # These tests target the most complex features: recursive and polymorphic types. + # --- + describe "Tdd.Compiler: Advanced Features (μ, Λ, Apply)" do + @doc """ + It checks for covariance in generic types: a list of integers is a subtype of a list of anything, + but the reverse is not true. This requires the system to correctly handle coinductive reasoning + on the recursive TDD nodes. + """ + test "the previously crashing recursive subtype test now passes" do + int_list = {:list_of, :integer} + any_list = {:list_of, :any} + assert_subtype(:integer, :any) + # The key test that was failing due to the bug + assert_subtype(int_list, any_list) + refute_subtype(any_list, int_list) - test("int < 10 AND int > 20 is a contradiction", :contradiction, %{ - Variable.v_int_lt(10) => true, - Variable.v_int_gt(20) => true - }) + # Also test instances against the recursive type + assert_subtype({:cons, {:literal, 1}, {:literal, []}}, int_list) + refute_subtype({:cons, {:literal, :a}, {:literal, []}}, int_list) + end - test("int > 5 AND int < 4 is a contradiction", :contradiction, %{ - Variable.v_int_gt(5) => true, - Variable.v_int_lt(4) => true - }) + @doc "Tests that manually-defined recursive types (like a binary tree) can be compiled and checked correctly." + test "explicit μ-types" do + leaf_node = {:literal, :empty_tree} + tree_spec = + {:mu, :Tree, + {:union, + [ + leaf_node, + {:tuple, [:atom, {:type_var, :Tree}, {:type_var, :Tree}]} + ]}} + + # Test that it compiles to a valid TDD ID + assert is_integer(Compiler.spec_to_id(tree_spec)) - test("int > 5 AND int < 7 is consistent", :consistent, %{ - Variable.v_int_gt(5) => true, - Variable.v_int_lt(7) => true - }) + # Test that an instance of the tree is correctly identified as a subtype + simple_tree_instance = {:tuple, [{:literal, :a}, leaf_node, leaf_node]} + assert_subtype(simple_tree_instance, tree_spec) + end - # --- Final Report --- - failures = Process.get(:test_failures, []) + @doc """ + Tests that a polymorphic type created via lambda application is equivalent + to its manually specialized counterpart. e.g., `(List)(int)` should be the + same as `List`. + """ + test "polymorphism (Λ, Apply)" do + gen_list_lambda = {:type_lambda, [:Tparam], {:list_of, {:type_var, :Tparam}}} + list_of_int_from_apply = {:type_apply, gen_list_lambda, [:integer]} + int_list = {:list_of, :integer} - if failures == [] do - IO.puts("\n✅ All Consistency.Engine tests passed!") - else - IO.puts("\n❌ Found #{length(failures)} test failures.") + assert_equivalent_specs(list_of_int_from_apply, int_list) end end end -defmodule TddAlgoTests do - alias Tdd.Store - alias Tdd.Variable - alias Tdd.Algo - - defp test(name, expected, result) do - if expected == result do - IO.puts("[PASS] #{name}") - else - IO.puts("[FAIL] #{name}") - IO.puts(" Expected: #{inspect(expected)}") - IO.puts(" Got: #{inspect(result)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - end - end - - def run() do - IO.puts("\n--- Running Tdd.Algo Tests ---") - Process.put(:test_failures, []) - - Store.init() - true_id = Store.true_node_id() - false_id = Store.false_node_id() - - t_atom = Store.find_or_create_node(Variable.v_is_atom(), true_id, false_id, false_id) - t_int = Store.find_or_create_node(Variable.v_is_integer(), true_id, false_id, false_id) - - foo_val_check = - Store.find_or_create_node(Variable.v_atom_eq(:foo), true_id, false_id, false_id) - t_foo = Store.find_or_create_node(Variable.v_is_atom(), foo_val_check, false_id, false_id) - bar_val_check = - Store.find_or_create_node(Variable.v_atom_eq(:bar), true_id, false_id, false_id) - t_bar = Store.find_or_create_node(Variable.v_is_atom(), bar_val_check, false_id, false_id) - - IO.puts("\n--- Section: Algo.negate ---") - test("negate(true) is false", false_id, Algo.negate(true_id)) - test("negate(false) is true", true_id, Algo.negate(false_id)) - test("negate(negate(t_atom)) is t_atom", t_atom, Algo.negate(Algo.negate(t_atom))) - - IO.puts("\n--- Section: Algo.apply (raw structural operations) ---") - - op_sum = fn - :true_terminal, _ -> :true_terminal - _, :true_terminal -> :true_terminal - t, :false_terminal -> t - :false_terminal, t -> t - end - - op_intersect = fn - :false_terminal, _ -> :false_terminal - _, :false_terminal -> :false_terminal - t, :true_terminal -> t - :true_terminal, t -> t - end - - sum_atom_int = Algo.apply(:sum, op_sum, t_atom, t_int) - is_atom_node = {Variable.v_is_atom(), true_id, t_int, t_int} - expected_sum_structure_id = - Store.find_or_create_node( - elem(is_atom_node, 0), - elem(is_atom_node, 1), - elem(is_atom_node, 2), - elem(is_atom_node, 3) - ) - test("Structure of 'atom | int' is correct", expected_sum_structure_id, sum_atom_int) - - intersect_foo_bar_raw = Algo.apply(:intersect, op_intersect, t_foo, t_bar) - test(":foo & :bar (raw) is not the false node", false, intersect_foo_bar_raw == false_id) - - IO.puts("\n--- Section: Algo.simplify (with Consistency.Engine) ---") - contradictory_assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true} - simplified_under_contradiction = Algo.simplify(true_id, contradictory_assumptions) - test( - "Simplifying under contradictory assumptions (atom & int) results in false", - false_id, - simplified_under_contradiction - ) - - assumptions_with_foo = %{Variable.v_atom_eq(:foo) => true} - simplified_int_given_foo = Algo.simplify(t_int, assumptions_with_foo) - test( - "Simplifying 'integer' given 'value==:foo' results in false", - false_id, - simplified_int_given_foo - ) - - intersect_atom_int_raw = Algo.apply(:intersect, op_intersect, t_atom, t_int) - simplified_atom_int = Algo.simplify(intersect_atom_int_raw, %{}) - test("Simplifying 'atom & int' results in false", false_id, simplified_atom_int) - - simplified_sum_given_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => true}) - test( - "Simplifying 'atom | int' given 'is_atom==true' results in true", - true_id, - simplified_sum_given_atom - ) - - simplified_sum_given_not_atom = Algo.simplify(sum_atom_int, %{Variable.v_is_atom() => false}) - test( - "Simplifying 'atom | int' given 'is_atom==false' results in 'integer'", - t_int, - simplified_sum_given_not_atom - ) - - failures = Process.get(:test_failures, []) - if failures == [], do: IO.puts("\n✅ All Tdd.Algo tests passed!"), - else: IO.puts("\n❌ Found #{length(failures)} test failures.") - end -end - -# NOTE: Tdd.TypeReconstructor and its tests are removed. - -defmodule CompilerAlgoTests do - alias Tdd.Compiler - alias Tdd.Store - - defp are_equivalent(spec1, spec2), - do: Compiler.spec_to_id(spec1) == Compiler.spec_to_id(spec2) - defp is_contradiction(spec), do: Compiler.spec_to_id(spec) == Store.false_node_id() - defp test_subtype(name, expected, s1, s2), do: test(name, expected, Compiler.is_subtype(s1, s2)) - defp test_equiv(name, expected, s1, s2), do: test(name, expected, are_equivalent(s1, s2)) - defp test_contradiction(name, expected \\ true), do: &test(name, expected, is_contradiction(&1)) - - defp test(name, exp, res) do - is_ok = exp == res - status = if is_ok, do: "[PASS]", else: "[FAIL]" - IO.puts("#{status} #{name}") - unless is_ok do - IO.puts(" Expected: #{inspect(exp)}, Got: #{inspect(res)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - end - end - - def run() do - IO.puts("\n--- Running Compiler & Algo Integration Tests ---") - Tdd.Store.init() - Process.put(:test_failures, []) - # No top-level init, Compiler.spec_to_id does it. - - IO.puts("\n--- Section: Basic Equivalences ---") - test_equiv("atom & any == atom", true, {:intersect, [:atom, :any]}, :atom) - test_equiv("atom | none == atom", true, {:union, [:atom, :none]}, :atom) - test_equiv("atom & int == none", true, {:intersect, [:atom, :integer]}, :none) - - IO.puts("\n--- Section: Basic Subtyping ---") - test_subtype(":foo <: atom", true, {:literal, :foo}, :atom) - test_subtype("atom <: :foo", false, :atom, {:literal, :foo}) - test_subtype("none <: atom", true, :none, :atom) - test_subtype("atom <: any", true, :atom, :any) - - IO.puts("\n--- Section: Contradictions & Simplifications ---") - test_contradiction("atom & integer").({:intersect, [:atom, :integer]}) - test_contradiction(":foo & :bar").({:intersect, [{:literal, :foo}, {:literal, :bar}]}) - - IO.puts("\n--- Section: Subtype Reduction Logic ---") - test_equiv( - "(:foo | :bar | atom) simplifies to atom", - true, - {:union, [{:literal, :foo}, {:literal, :bar}, :atom]}, - :atom - ) - - failures = Process.get(:test_failures, []) - if failures == [], do: IO.puts("\n✅ All Compiler & Algo Integration tests passed!"), - else: IO.puts("\n❌ Found #{length(failures)} test failures.") - end -end - -defmodule TddCompilerRecursiveTests do - alias Tdd.Compiler - alias Tdd.Store - alias Tdd.TypeSpec - - def run() do - IO.puts("\n--- Running Tdd.Compiler Recursive Type Tests ---") - Tdd.Store.init() - Process.put(:test_failures, []) - # No top-level init - - IO.puts("\n--- Section: :cons ---") - test_subtype(":cons is a subtype of :list", true, {:cons, :atom, :list}, :list) - test_subtype( - "cons(integer, list) is a subtype of cons(any, any)", - true, - {:cons, :integer, :list}, - {:cons, :any, :any} - ) - - IO.puts("\n--- Section: :tuple ---") - test_subtype( - "{:tuple, [atom, int]} is a subtype of :tuple", - true, - {:tuple, [:atom, :integer]}, - :tuple - ) - spec_specific = {:tuple, [{:literal, :a}, {:literal, 1}]} - spec_general = {:tuple, [:atom, :integer]} - test_subtype("subtype check works element-wise", true, spec_specific, spec_general) - - IO.puts("\n--- Section: :list_of ---") - test_subtype("list_of(E) is a subtype of list", true, {:list_of, :integer}, :list) - test_subtype( - "empty list is a subtype of any list_of(E)", - true, - {:literal, []}, - {:list_of, :integer} - ) - test_subtype( - "list_of(subtype) is a subtype of list_of(supertype)", - true, - {:list_of, {:literal, 1}}, - {:list_of, :integer} - ) - test_subtype( - "list_of(supertype) is not a subtype of list_of(subtype)", - false, - {:list_of, :integer}, - {:list_of, {:literal, 1}} - ) - - IO.puts("\n--- Section: Equivalence ---") - e_spec = :integer - list_of_e = {:list_of, e_spec} - recursive_def = TypeSpec.normalize({:union, [{:literal, []}, {:cons, e_spec, list_of_e}]}) - test_equiv("list_of(E) == [] | cons(E, list_of(E))", true, list_of_e, recursive_def) - - failures = Process.get(:test_failures, []) - if failures == [], do: IO.puts("\n✅ All Tdd.Compiler Recursive Type tests passed!"), - else: IO.puts("\n❌ Found #{length(failures)} test failures.") - end - - defp test(name, expected, result) do - if expected == result, do: IO.puts("[PASS] #{name}"), - else: ( - IO.puts("[FAIL] #{name}") - IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(result)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - ) - end - defp test_equiv(name, expected, spec1, spec2), - do: test(name, expected, Tdd.Compiler.spec_to_id(spec1) == Tdd.Compiler.spec_to_id(spec2)) - defp test_subtype(name, expected, spec1, spec2), - do: test(name, expected, Tdd.Compiler.is_subtype(spec1, spec2)) -end - -defmodule Tdd.TypeSpecAdvancedTests do - alias Tdd.TypeSpec - - defp test(name, expected_spec, input_spec) do - current_failures = Process.get(:test_failures, []) - normalized_result = TypeSpec.normalize(input_spec) - is_equal = expected_spec == normalized_result - if is_equal, do: IO.puts("[PASS] #{name}"), - else: ( - IO.puts("[FAIL] #{name}") - IO.puts(" Input: #{inspect(input_spec)}") - IO.puts(" Expected: #{inspect(expected_spec)}") - IO.puts(" Got: #{inspect(normalized_result)}") - Process.put(:test_failures, [name | current_failures]) - ) - end - - defp test_raise(name, expected_error_struct, expected_message_regex, fun) do - try do - fun.() - # If it gets here, the function did not raise - IO.puts("[FAIL] #{name}") - IO.puts(" Expected an error to be raised, but nothing was.") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - rescue - e -> - if String.match?(e.message, expected_message_regex) do - IO.puts("[PASS] #{name}") - else - IO.puts("[FAIL] #{name}") - IO.puts(" Raised correct error type, but message did not match.") - IO.puts(" Expected message to match: #{inspect(expected_message_regex)}") - IO.puts(" Got message: #{e.message}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - end - catch - type, value -> - IO.puts("[FAIL] #{name}") - IO.puts(" Raised unexpected error type.") - IO.puts(" Expected: #{inspect(expected_error_struct)}") - IO.puts(" Got: #{type}:#{inspect(value)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - end - end - - def run() do - IO.puts("\n--- Running Tdd.TypeSpec Advanced Normalization Tests ---") - Tdd.Store.init() - Process.put(:test_failures, []) - - IO.puts("\n--- Section: μ-type (Recursive Type) Normalization ---") - test( - "basic alpha-conversion for μ-variable", - {:mu, :m_var0, {:type_var, :m_var0}}, - {:mu, :X, {:type_var, :X}} - ) - test( - "list_of(integer) normalizes to a μ-expression with canonical var", - {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}}, - {:list_of, :integer} - ) - - IO.puts("\n--- Section: Type Application Normalization (Beta-Reduction) ---") - test( - "simple application: (ΛT.T) integer -> integer", - :integer, - {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]} - ) - test( - "application with structure: (ΛT. list_of(T)) atom -> list_of(atom) (normalized form)", - {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}}, - {:type_apply, {:type_lambda, [:T], {:list_of, {:type_var, :T}}}, [:atom]} - ) - - failures = Process.get(:test_failures, []) - if failures == [], do: IO.puts("\n✅ All Tdd.TypeSpec Advanced Normalization tests passed!"), - else: IO.puts("\n❌ Found #{length(failures)} test failures.") - end -end - -defmodule Tdd.CompilerAdvancedTests do - alias Tdd.Compiler - alias Tdd.Store - alias Tdd.TypeSpec - - defp test(name, expected, actual_fun_call_result) do - if expected == actual_fun_call_result, do: IO.puts("[PASS] #{name}"), - else: ( - IO.puts("[FAIL] #{name}") - IO.puts(" Expected: #{inspect(expected)}, Got: #{inspect(actual_fun_call_result)}") - Process.put(:test_failures, [name | Process.get(:test_failures, [])]) - ) - end - defp test_subtype(name, expected_bool, spec1, spec2), - do: test(name, expected_bool, Tdd.Compiler.is_subtype(spec1, spec2)) - defp test_equivalent_tdd(name, spec1, spec2), - do: test(name, Tdd.Compiler.spec_to_id(spec1), Tdd.Compiler.spec_to_id(spec2)) - defp test_raise_compile(name, err, msg, spec), - do: Tdd.TypeSpecAdvancedTests.test_raise(name, err, msg, fn -> - Tdd.Compiler.spec_to_id(spec) - end) - - def run() do - IO.puts("\n--- Running Tdd.Compiler Advanced Feature Tests (μ, Λ, Apply) ---") - Tdd.Store.init() - Process.put(:test_failures, []) - # No top-level init - - IO.puts("\n--- Section: Basic μ-type (list_of) ---") - int_list = {:list_of, :integer} - any_list = {:list_of, :any} - - # THIS IS THE TEST THAT WAS CRASHING - test_subtype("list_of(integer) <: list_of(any)", true, int_list, any_list) - # AND ITS COUNTERPART - test_subtype("list_of(any) <: list_of(integer)", false, any_list, int_list) - - test_subtype( - "cons(1, []) <: list_of(integer)", - true, - {:cons, {:literal, 1}, {:literal, []}}, - int_list - ) - - test_subtype( - "cons(:a, []) !<: list_of(integer)", - false, - {:cons, {:literal, :a}, {:literal, []}}, - int_list - ) - - IO.puts("\n--- Section: Explicit μ-types ---") - leaf_node = {:literal, :empty_tree} - tree_spec = - {:mu, :Tree, - {:union, - [ - leaf_node, - {:tuple, [:atom, {:type_var, :Tree}, {:type_var, :Tree}]} - ]}} - test("Explicit mu-type (atom tree) compiles", true, is_integer(Tdd.Compiler.spec_to_id(tree_spec))) - simple_tree_instance = {:tuple, [{:literal, :a}, leaf_node, leaf_node]} - test_subtype("Simple atom tree instance <: AtomTree", true, simple_tree_instance, tree_spec) - - IO.puts("\n--- Section: Polymorphism (Λ, Apply) ---") - gen_list_lambda = {:type_lambda, [:Tparam], {:list_of, {:type_var, :Tparam}}} - list_of_int_from_apply = {:type_apply, gen_list_lambda, [:integer]} - test_equivalent_tdd( - "(ΛT. list_of(T)) TDD == list_of(integer) TDD", - list_of_int_from_apply, - int_list - ) - - failures = Process.get(:test_failures, []) - if failures == [], do: IO.puts("\n✅ All Tdd.Compiler Advanced Feature tests passed!"), - else: IO.puts("\n❌ Found #{length(failures)} test failures.") - end -end - -Process.sleep(100) -TypeSpecTests.run() -TddStoreTests.run() -# The variable tests need a compiler and its dependencies -TddVariableTests.run() -TddAlgoTests.run() -ConsistencyEngineTests.run() -CompilerAlgoTests.run() -TddCompilerRecursiveTests.run() -Tdd.TypeSpecAdvancedTests.run() -Tdd.CompilerAdvancedTests.run() - +# Run all defined tests +ExUnit.run() diff --git a/test/support/test_helper.ex b/test/support/test_helper.ex index 2dc8918..d94e773 100644 --- a/test/support/test_helper.ex +++ b/test/support/test_helper.ex @@ -151,7 +151,8 @@ defmodule Til.TestHelpers do {original_key, deep_strip_id(original_value, nodes_map)} is_list(original_value) -> - {original_key, deep_strip_id(original_value, nodes_map)} # Handles lists of type defs + # Handles lists of type defs + {original_key, deep_strip_id(original_value, nodes_map)} true -> {original_key, original_value} @@ -165,7 +166,8 @@ defmodule Til.TestHelpers do # Recursively call on elements for lists of type definitions Enum.map(type_definition, &deep_strip_id(&1, nodes_map)) - true -> # Literals, atoms, numbers, nil, etc. (leaf nodes in the type structure) + # Literals, atoms, numbers, nil, etc. (leaf nodes in the type structure) + true -> type_definition end end diff --git a/test/test_helper.exs b/test/test_helper.exs index 869559e..9d7dd5e 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1 +1,6 @@ ExUnit.start() + +ExUnitSummary.start(:normal, %ExUnitSummary.Config{filter_results: :failed, print_delay: 100}) + +# Add ExUnitSummary.Formatter to list of ExUnit's formatters. +ExUnit.configure(formatters: [ExUnit.CLIFormatter, ExUnitSummary.Formatter]) diff --git a/test/til_test.exs b/test/til_test.exs index 56d9a3d..3865245 100644 --- a/test/til_test.exs +++ b/test/til_test.exs @@ -1,8 +1,434 @@ -defmodule TilTest do - use ExUnit.Case - doctest Til +defmodule TddSystemTest do + # Most tests mutate Tdd.Store, so they cannot run concurrently. + use ExUnit.Case, async: false - test "greets the world" do - assert Til.hello() == :world + alias Tdd.TypeSpec + alias Tdd.Store + alias Tdd.Variable + alias Tdd.Compiler + alias Tdd.Consistency.Engine + alias Tdd.Algo + + # Helper to mimic the old test structure and provide better failure messages + # for spec comparisons. + defp assert_spec_normalized(expected, input_spec) do + result = TypeSpec.normalize(input_spec) + # The normalization process should produce a canonical, sorted form. + assert expected == result, """ + Input Spec: + #{inspect(input_spec, pretty: true)} + + Expected Normalized: + #{inspect(expected, pretty: true)} + + Actual Normalized: + #{inspect(result, pretty: true)} + """ + end + + # Helper to check for equivalence by comparing TDD IDs. + defmacro assert_equivalent_specs(spec1, spec2) do + quote do + assert Compiler.spec_to_id(unquote(spec1)) == Compiler.spec_to_id(unquote(spec2)) + end + end + + # Helper to check for subtyping using the TDD compiler. + defmacro assert_subtype(spec1, spec2) do + quote do + assert Compiler.is_subtype(unquote(spec1), unquote(spec2)) + end + end + + defmacro refute_subtype(spec1, spec2) do + quote do + refute Compiler.is_subtype(unquote(spec1), unquote(spec2)) + end + end + + # Setup block that initializes the Tdd.Store before each test. + # This ensures that node IDs and caches are clean for every test case. + setup do + Tdd.Store.init() + :ok + end + + # --- + # Tdd.Store Tests + # These tests validate the lowest-level state management of the TDD system. + # The Store is responsible for creating and storing the nodes of the decision diagram graph. + # --- + describe "Tdd.Store: Core state management for the TDD graph" do + @doc """ + Tests that the store initializes with the correct, reserved IDs for the + terminal nodes representing TRUE (:any) and FALSE (:none). + """ + test "initialization and terminals" do + assert Store.true_node_id() == 1 + assert Store.false_node_id() == 0 + assert Store.get_node(1) == {:ok, :true_terminal} + assert Store.get_node(0) == {:ok, :false_terminal} + assert Store.get_node(99) == {:error, :not_found} + end + + @doc """ + Tests the core functionality of creating nodes. It verifies that new nodes receive + incrementing IDs and that requesting an identical node reuses the existing one + (structural sharing), which is fundamental to the efficiency of TDDs. + """ + test "node creation and structural sharing" do + var_a = {:is_atom} + var_b = {:is_integer} + true_id = Store.true_node_id() + false_id = Store.false_node_id() + + # First created node gets ID 2 (after 0 and 1 are taken by terminals) + id1 = Store.find_or_create_node(var_a, true_id, false_id, false_id) + assert id1 == 2 + assert Store.get_node(id1) == {:ok, {var_a, true_id, false_id, false_id}} + + # Second, different node gets the next ID + id2 = Store.find_or_create_node(var_b, id1, false_id, false_id) + assert id2 == 3 + + # Creating the first node again returns the same ID, not a new one + id1_again = Store.find_or_create_node(var_a, true_id, false_id, false_id) + assert id1_again == id1 + + # Next new node gets the correct subsequent ID, proving no ID was wasted + id3 = Store.find_or_create_node(var_b, true_id, false_id, false_id) + assert id3 == 4 + end + + @doc """ + Tests a key reduction rule: if a node's 'yes', 'no', and 'don't care' branches + all point to the same child node, the parent node is redundant and should be + replaced by the child node itself. + """ + test "node reduction rule for identical children" do + var_a = {:is_atom} + # from previous test logic + id3 = 4 + id_redundant = Store.find_or_create_node(var_a, id3, id3, id3) + assert id_redundant == id3 + end + + @doc """ + Tests the memoization cache for operations like 'apply', 'negate', etc. + This ensures that repeated operations with the same inputs do not trigger + redundant computations. + """ + test "operation caching" do + cache_key = {:my_op, 1, 2} + assert Store.get_op_cache(cache_key) == :not_found + + Store.put_op_cache(cache_key, :my_result) + assert Store.get_op_cache(cache_key) == {:ok, :my_result} + + Store.put_op_cache(cache_key, :new_result) + assert Store.get_op_cache(cache_key) == {:ok, :new_result} + end + end + + # --- + # Tdd.TypeSpec.normalize/1 Tests + # These tests focus on ensuring the `normalize` function correctly transforms + # any TypeSpec into its canonical, simplified form. + # --- + describe "Tdd.TypeSpec.normalize/1: Base & Simple Types" do + @doc "Tests that normalizing already-simple specs doesn't change them (idempotency)." + test "normalizing :any is idempotent" do + assert_spec_normalized(:any, :any) + end + + test "normalizing :none is idempotent" do + assert_spec_normalized(:none, :none) + end + + test "normalizing :atom is idempotent" do + assert_spec_normalized(:atom, :atom) + end + + test "normalizing a literal is idempotent" do + assert_spec_normalized({:literal, :foo}, {:literal, :foo}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Double Negation" do + @doc "Tests the logical simplification that ¬(¬A) is equivalent to A." + test "¬(¬atom) simplifies to atom" do + assert_spec_normalized(:atom, {:negation, {:negation, :atom}}) + end + + @doc "Tests that a single negation is preserved when it cannot be simplified further." + test "A single negation is preserved" do + assert_spec_normalized({:negation, :integer}, {:negation, :integer}) + end + + @doc "Tests that an odd number of negations simplifies to a single negation." + test "¬(¬(¬atom)) simplifies to ¬atom" do + assert_spec_normalized({:negation, :atom}, {:negation, {:negation, {:negation, :atom}}}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Union Normalization" do + @doc """ + Tests that unions are canonicalized by flattening nested unions, sorting the members, + and removing duplicates. e.g., `int | (list | atom | int)` becomes `(atom | int | list)`. + """ + test "flattens, sorts, and uniques members" do + input = {:union, [:integer, {:union, [:list, :atom, :integer]}]} + expected = {:union, [:atom, :integer, :list]} + assert_spec_normalized(expected, input) + end + + @doc "Tests `A | none` simplifies to `A`, as `:none` is the identity for union." + test "simplifies a union with :none (A | none -> A)" do + assert_spec_normalized(:atom, {:union, [:atom, :none]}) + end + + @doc "Tests `A | any` simplifies to `any`, as `:any` is the absorbing element for union." + test "simplifies a union with :any (A | any -> any)" do + assert_spec_normalized(:any, {:union, [:atom, :any]}) + end + + @doc "An empty set of types is logically equivalent to `:none`." + test "an empty union simplifies to :none" do + assert_spec_normalized(:none, {:union, []}) + end + + @doc "A union containing just one type should simplify to that type itself." + test "a union of a single element simplifies to the element itself" do + assert_spec_normalized(:atom, {:union, [:atom]}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Intersection Normalization" do + @doc "Tests that intersections are canonicalized like unions (flatten, sort, unique)." + test "flattens, sorts, and uniques members" do + input = {:intersect, [:integer, {:intersect, [:list, :atom, :integer]}]} + expected = {:intersect, [:atom, :integer, :list]} + assert_spec_normalized(expected, input) + end + + @doc "Tests `A & any` simplifies to `A`, as `:any` is the identity for intersection." + test "simplifies an intersection with :any (A & any -> A)" do + assert_spec_normalized(:atom, {:intersect, [:atom, :any]}) + end + + @doc "Tests `A & none` simplifies to `none`, as `:none` is the absorbing element." + test "simplifies an intersection with :none (A & none -> none)" do + assert_spec_normalized(:none, {:intersect, [:atom, :none]}) + end + + @doc "An intersection of zero types is logically `any` (no constraints)." + test "an empty intersection simplifies to :any" do + assert_spec_normalized(:any, {:intersect, []}) + end + + @doc "An intersection of one type simplifies to the type itself." + test "an intersection of a single element simplifies to the element itself" do + assert_spec_normalized(:atom, {:intersect, [:atom]}) + end + end + + describe "Tdd.TypeSpec.normalize/1: Subtype Reduction" do + @doc """ + Tests a key simplification: if a union contains a type and its own subtype, + the subtype is redundant and should be removed. E.g., `(1 | integer)` is just `integer`. + Here, `:foo` and `:bar` are subtypes of `:atom`, so the union simplifies to `:atom`. + """ + test "(:foo | :bar | atom) simplifies to atom" do + input = {:union, [{:literal, :foo}, {:literal, :bar}, :atom]} + expected = :atom + assert_spec_normalized(expected, input) + end + end + + describe "Tdd.TypeSpec: Advanced Normalization (μ, Λ, Apply)" do + @doc """ + Tests alpha-conversion for recursive types. The bound variable name (`:X`) + should be renamed to a canonical name (`:m_var0`) to ensure structural equality + regardless of the name chosen by the user. + """ + test "basic alpha-conversion for μ-variable" do + input = {:mu, :X, {:type_var, :X}} + expected = {:mu, :m_var0, {:type_var, :m_var0}} + assert_spec_normalized(expected, input) + end + + @doc """ + Tests that the syntactic sugar `{:list_of, T}` is correctly desugared into + its underlying recursive definition: `μT.[] | cons(T, μT)`. + """ + test "list_of(integer) normalizes to a μ-expression with canonical var" do + input = {:list_of, :integer} + + expected = + {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :integer, {:type_var, :m_var0}}]}} + + assert_spec_normalized(expected, input) + end + + @doc """ + Tests beta-reduction (function application). Applying the identity function + `(ΛT.T)` to `integer` should result in `integer`. + """ + test "simple application: (ΛT.T) integer -> integer" do + input = {:type_apply, {:type_lambda, [:T], {:type_var, :T}}, [:integer]} + expected = :integer + assert_spec_normalized(expected, input) + end + + @doc """ + Tests a more complex beta-reduction. Applying a list constructor lambda + to `:atom` should produce the normalized form of `list_of(atom)`. + """ + test "application with structure: (ΛT. list_of(T)) atom -> list_of(atom) (normalized form)" do + input = {:type_apply, {:type_lambda, [:T], {:list_of, {:type_var, :T}}}, [:atom]} + expected = {:mu, :m_var0, {:union, [{:literal, []}, {:cons, :atom, {:type_var, :m_var0}}]}} + assert_spec_normalized(expected, input) + end + end + + # --- + # Tdd.Consistency.Engine Tests + # These tests validate the logic that detects contradictions in a set of predicate assumptions. + # --- + describe "Tdd.Consistency.Engine: Logic for detecting contradictions" do + # This setup is local to this describe block, which is fine. + setup do + Tdd.Store.init() + id_atom = Tdd.Compiler.spec_to_id(:atom) + %{id_atom: id_atom} + end + + @doc "An empty set of assumptions has no contradictions." + test "an empty assumption map is consistent" do + assert Engine.check(%{}) == :consistent + end + + @doc """ + Tests that the engine uses predicate traits to find implied contradictions. + `v_atom_eq(:foo)` implies `v_is_atom()` is true, which contradicts the explicit + assumption that `v_is_atom()` is false. + """ + test "an implied contradiction is caught by expander" do + assumptions = %{Variable.v_atom_eq(:foo) => true, Variable.v_is_atom() => false} + assert Engine.check(assumptions) == :contradiction + end + + @doc "A term cannot belong to two different primary types like :atom and :integer." + test "two primary types cannot both be true" do + assumptions = %{Variable.v_is_atom() => true, Variable.v_is_integer() => true} + assert Engine.check(assumptions) == :contradiction + end + + @doc "A list cannot be empty and simultaneously have properties on its head (which wouldn't exist)." + test "a list cannot be empty and have a head property", %{id_atom: id_atom} do + assumptions = %{ + Variable.v_list_is_empty() => true, + Variable.v_list_head_pred(id_atom) => true + } + + assert Engine.check(assumptions) == :contradiction + end + + @doc "Tests for logical contradictions in integer ranges." + test "int < 10 AND int > 20 is a contradiction" do + assumptions = %{ + Variable.v_int_lt(10) => true, + Variable.v_int_gt(20) => true + } + + assert Engine.check(assumptions) == :contradiction + end + end + + # --- + # Compiler & Algo Integration Tests + # These tests ensure that the high-level public APIs (`is_subtype`, `spec_to_id`) + # work correctly by integrating the compiler and the graph algorithms. + # --- + describe "Tdd.Compiler and Tdd.Algo Integration: High-level API validation" do + @doc "Verifies semantic equivalence of types using TDD IDs. e.g., `atom & any` is the same type as `atom`." + test "basic equivalences" do + assert_equivalent_specs({:intersect, [:atom, :any]}, :atom) + assert_equivalent_specs({:union, [:atom, :none]}, :atom) + assert_equivalent_specs({:intersect, [:atom, :integer]}, :none) + end + + @doc "Tests the main `is_subtype` public API for simple, non-recursive types." + test "basic subtyping" do + assert_subtype({:literal, :foo}, :atom) + refute_subtype(:atom, {:literal, :foo}) + assert_subtype(:none, :atom) + assert_subtype(:atom, :any) + end + + @doc "Tests that impossible type intersections compile to the `:none` (FALSE) node." + test "contradictions" do + assert Compiler.spec_to_id({:intersect, [:atom, :integer]}) == Store.false_node_id() + + assert Compiler.spec_to_id({:intersect, [{:literal, :foo}, {:literal, :bar}]}) == + Store.false_node_id() + end + end + + # --- + # Tdd.Compiler Advanced Feature Tests + # These tests target the most complex features: recursive and polymorphic types. + # --- + describe "Tdd.Compiler: Advanced Features (μ, Λ, Apply)" do + @doc """ + It checks for covariance in generic types: a list of integers is a subtype of a list of anything, + but the reverse is not true. This requires the system to correctly handle coinductive reasoning + on the recursive TDD nodes. + """ + test "the previously crashing recursive subtype test now passes" do + int_list = {:list_of, :integer} + any_list = {:list_of, :any} + assert_subtype(:integer, :any) + # The key test that was failing due to the bug + assert_subtype(int_list, any_list) + refute_subtype(any_list, int_list) + + # Also test instances against the recursive type + assert_subtype({:cons, {:literal, 1}, {:literal, []}}, int_list) + refute_subtype({:cons, {:literal, :a}, {:literal, []}}, int_list) + end + + @doc "Tests that manually-defined recursive types (like a binary tree) can be compiled and checked correctly." + test "explicit μ-types" do + leaf_node = {:literal, :empty_tree} + + tree_spec = + {:mu, :Tree, + {:union, + [ + leaf_node, + {:tuple, [:atom, {:type_var, :Tree}, {:type_var, :Tree}]} + ]}} + + # Test that it compiles to a valid TDD ID + assert is_integer(Compiler.spec_to_id(tree_spec)) + + # Test that an instance of the tree is correctly identified as a subtype + simple_tree_instance = {:tuple, [{:literal, :a}, leaf_node, leaf_node]} + assert_subtype(simple_tree_instance, tree_spec) + end + + @doc """ + Tests that a polymorphic type created via lambda application is equivalent + to its manually specialized counterpart. e.g., `(List)(int)` should be the + same as `List`. + """ + test "polymorphism (Λ, Apply)" do + gen_list_lambda = {:type_lambda, [:Tparam], {:list_of, {:type_var, :Tparam}}} + list_of_int_from_apply = {:type_apply, gen_list_lambda, [:integer]} + int_list = {:list_of, :integer} + + assert_equivalent_specs(list_of_int_from_apply, int_list) + end end end diff --git a/test/tilly/bdd/atom_bool_ops_test.exs b/test/tilly/bdd/atom_bool_ops_test.exs index d41bdc0..46e63eb 100644 --- a/test/tilly/bdd/atom_bool_ops_test.exs +++ b/test/tilly/bdd/atom_bool_ops_test.exs @@ -1,77 +1,77 @@ -defmodule Tilly.BDD.AtomBoolOpsTest do - use ExUnit.Case, async: true - - alias Tilly.BDD.AtomBoolOps - - describe "compare_elements/2" do - test "correctly compares atoms" do - assert AtomBoolOps.compare_elements(:apple, :banana) == :lt - assert AtomBoolOps.compare_elements(:banana, :apple) == :gt - assert AtomBoolOps.compare_elements(:cherry, :cherry) == :eq - end - end - - describe "equal_element?/2" do - test "correctly checks atom equality" do - assert AtomBoolOps.equal_element?(:apple, :apple) == true - assert AtomBoolOps.equal_element?(:apple, :banana) == false - end - end - - describe "hash_element/1" do - test "hashes atoms consistently" do - assert is_integer(AtomBoolOps.hash_element(:foo)) - assert AtomBoolOps.hash_element(:foo) == AtomBoolOps.hash_element(:foo) - assert AtomBoolOps.hash_element(:foo) != AtomBoolOps.hash_element(:bar) - end - end - - describe "leaf operations" do - test "empty_leaf/0 returns false" do - assert AtomBoolOps.empty_leaf() == false - end - - test "any_leaf/0 returns true" do - assert AtomBoolOps.any_leaf() == true - end - - test "is_empty_leaf?/1" do - assert AtomBoolOps.is_empty_leaf?(false) == true - assert AtomBoolOps.is_empty_leaf?(true) == false - end - - test "union_leaves/3" do - assert AtomBoolOps.union_leaves(%{}, false, false) == false - assert AtomBoolOps.union_leaves(%{}, true, false) == true - assert AtomBoolOps.union_leaves(%{}, false, true) == true - assert AtomBoolOps.union_leaves(%{}, true, true) == true - end - - test "intersection_leaves/3" do - assert AtomBoolOps.intersection_leaves(%{}, false, false) == false - assert AtomBoolOps.intersection_leaves(%{}, true, false) == false - assert AtomBoolOps.intersection_leaves(%{}, false, true) == false - assert AtomBoolOps.intersection_leaves(%{}, true, true) == true - end - - test "negation_leaf/2" do - assert AtomBoolOps.negation_leaf(%{}, false) == true - assert AtomBoolOps.negation_leaf(%{}, true) == false - end - end - - describe "test_leaf_value/1" do - test "returns :empty for false" do - assert AtomBoolOps.test_leaf_value(false) == :empty - end - - test "returns :full for true" do - assert AtomBoolOps.test_leaf_value(true) == :full - end - - # Conceptual test if atoms had other leaf values - # test "returns :other for other values" do - # assert AtomBoolOps.test_leaf_value(:some_other_leaf_marker) == :other - # end - end -end +# defmodule Tilly.BDD.AtomBoolOpsTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD.AtomBoolOps +# +# describe "compare_elements/2" do +# test "correctly compares atoms" do +# assert AtomBoolOps.compare_elements(:apple, :banana) == :lt +# assert AtomBoolOps.compare_elements(:banana, :apple) == :gt +# assert AtomBoolOps.compare_elements(:cherry, :cherry) == :eq +# end +# end +# +# describe "equal_element?/2" do +# test "correctly checks atom equality" do +# assert AtomBoolOps.equal_element?(:apple, :apple) == true +# assert AtomBoolOps.equal_element?(:apple, :banana) == false +# end +# end +# +# describe "hash_element/1" do +# test "hashes atoms consistently" do +# assert is_integer(AtomBoolOps.hash_element(:foo)) +# assert AtomBoolOps.hash_element(:foo) == AtomBoolOps.hash_element(:foo) +# assert AtomBoolOps.hash_element(:foo) != AtomBoolOps.hash_element(:bar) +# end +# end +# +# describe "leaf operations" do +# test "empty_leaf/0 returns false" do +# assert AtomBoolOps.empty_leaf() == false +# end +# +# test "any_leaf/0 returns true" do +# assert AtomBoolOps.any_leaf() == true +# end +# +# test "is_empty_leaf?/1" do +# assert AtomBoolOps.is_empty_leaf?(false) == true +# assert AtomBoolOps.is_empty_leaf?(true) == false +# end +# +# test "union_leaves/3" do +# assert AtomBoolOps.union_leaves(%{}, false, false) == false +# assert AtomBoolOps.union_leaves(%{}, true, false) == true +# assert AtomBoolOps.union_leaves(%{}, false, true) == true +# assert AtomBoolOps.union_leaves(%{}, true, true) == true +# end +# +# test "intersection_leaves/3" do +# assert AtomBoolOps.intersection_leaves(%{}, false, false) == false +# assert AtomBoolOps.intersection_leaves(%{}, true, false) == false +# assert AtomBoolOps.intersection_leaves(%{}, false, true) == false +# assert AtomBoolOps.intersection_leaves(%{}, true, true) == true +# end +# +# test "negation_leaf/2" do +# assert AtomBoolOps.negation_leaf(%{}, false) == true +# assert AtomBoolOps.negation_leaf(%{}, true) == false +# end +# end +# +# describe "test_leaf_value/1" do +# test "returns :empty for false" do +# assert AtomBoolOps.test_leaf_value(false) == :empty +# end +# +# test "returns :full for true" do +# assert AtomBoolOps.test_leaf_value(true) == :full +# end +# +# # Conceptual test if atoms had other leaf values +# # test "returns :other for other values" do +# # assert AtomBoolOps.test_leaf_value(:some_other_leaf_marker) == :other +# # end +# end +# end diff --git a/test/tilly/bdd/integer_bool_ops_test.exs b/test/tilly/bdd/integer_bool_ops_test.exs index 2e18ef2..480a428 100644 --- a/test/tilly/bdd/integer_bool_ops_test.exs +++ b/test/tilly/bdd/integer_bool_ops_test.exs @@ -1,67 +1,67 @@ -defmodule Tilly.BDD.IntegerBoolOpsTest do - use ExUnit.Case, async: true - - alias Tilly.BDD.IntegerBoolOps - - describe "compare_elements/2" do - test "correctly compares integers" do - assert IntegerBoolOps.compare_elements(1, 2) == :lt - assert IntegerBoolOps.compare_elements(2, 1) == :gt - assert IntegerBoolOps.compare_elements(1, 1) == :eq - end - end - - describe "equal_element?/2" do - test "correctly checks equality of integers" do - assert IntegerBoolOps.equal_element?(1, 1) == true - assert IntegerBoolOps.equal_element?(1, 2) == false - end - end - - describe "hash_element/1" do - test "returns the integer itself as hash" do - assert IntegerBoolOps.hash_element(123) == 123 - assert IntegerBoolOps.hash_element(-5) == -5 - end - end - - describe "leaf operations" do - test "empty_leaf/0 returns false" do - assert IntegerBoolOps.empty_leaf() == false - end - - test "any_leaf/0 returns true" do - assert IntegerBoolOps.any_leaf() == true - end - - test "is_empty_leaf?/1" do - assert IntegerBoolOps.is_empty_leaf?(false) == true - assert IntegerBoolOps.is_empty_leaf?(true) == false - end - end - - describe "union_leaves/3" do - test "computes boolean OR" do - assert IntegerBoolOps.union_leaves(%{}, true, true) == true - assert IntegerBoolOps.union_leaves(%{}, true, false) == true - assert IntegerBoolOps.union_leaves(%{}, false, true) == true - assert IntegerBoolOps.union_leaves(%{}, false, false) == false - end - end - - describe "intersection_leaves/3" do - test "computes boolean AND" do - assert IntegerBoolOps.intersection_leaves(%{}, true, true) == true - assert IntegerBoolOps.intersection_leaves(%{}, true, false) == false - assert IntegerBoolOps.intersection_leaves(%{}, false, true) == false - assert IntegerBoolOps.intersection_leaves(%{}, false, false) == false - end - end - - describe "negation_leaf/2" do - test "computes boolean NOT" do - assert IntegerBoolOps.negation_leaf(%{}, true) == false - assert IntegerBoolOps.negation_leaf(%{}, false) == true - end - end -end +# defmodule Tilly.BDD.IntegerBoolOpsTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD.IntegerBoolOps +# +# describe "compare_elements/2" do +# test "correctly compares integers" do +# assert IntegerBoolOps.compare_elements(1, 2) == :lt +# assert IntegerBoolOps.compare_elements(2, 1) == :gt +# assert IntegerBoolOps.compare_elements(1, 1) == :eq +# end +# end +# +# describe "equal_element?/2" do +# test "correctly checks equality of integers" do +# assert IntegerBoolOps.equal_element?(1, 1) == true +# assert IntegerBoolOps.equal_element?(1, 2) == false +# end +# end +# +# describe "hash_element/1" do +# test "returns the integer itself as hash" do +# assert IntegerBoolOps.hash_element(123) == 123 +# assert IntegerBoolOps.hash_element(-5) == -5 +# end +# end +# +# describe "leaf operations" do +# test "empty_leaf/0 returns false" do +# assert IntegerBoolOps.empty_leaf() == false +# end +# +# test "any_leaf/0 returns true" do +# assert IntegerBoolOps.any_leaf() == true +# end +# +# test "is_empty_leaf?/1" do +# assert IntegerBoolOps.is_empty_leaf?(false) == true +# assert IntegerBoolOps.is_empty_leaf?(true) == false +# end +# end +# +# describe "union_leaves/3" do +# test "computes boolean OR" do +# assert IntegerBoolOps.union_leaves(%{}, true, true) == true +# assert IntegerBoolOps.union_leaves(%{}, true, false) == true +# assert IntegerBoolOps.union_leaves(%{}, false, true) == true +# assert IntegerBoolOps.union_leaves(%{}, false, false) == false +# end +# end +# +# describe "intersection_leaves/3" do +# test "computes boolean AND" do +# assert IntegerBoolOps.intersection_leaves(%{}, true, true) == true +# assert IntegerBoolOps.intersection_leaves(%{}, true, false) == false +# assert IntegerBoolOps.intersection_leaves(%{}, false, true) == false +# assert IntegerBoolOps.intersection_leaves(%{}, false, false) == false +# end +# end +# +# describe "negation_leaf/2" do +# test "computes boolean NOT" do +# assert IntegerBoolOps.negation_leaf(%{}, true) == false +# assert IntegerBoolOps.negation_leaf(%{}, false) == true +# end +# end +# end diff --git a/test/tilly/bdd/node_test.exs b/test/tilly/bdd/node_test.exs index 22143c4..84f9773 100644 --- a/test/tilly/bdd/node_test.exs +++ b/test/tilly/bdd/node_test.exs @@ -1,123 +1,123 @@ -defmodule Tilly.BDD.NodeTest do - use ExUnit.Case, async: true - - alias Tilly.BDD.Node - - describe "Smart Constructors" do - test "mk_true/0 returns true" do - assert Node.mk_true() == true - end - - test "mk_false/0 returns false" do - assert Node.mk_false() == false - end - - test "mk_leaf/1 creates a leaf node" do - assert Node.mk_leaf(:some_value) == {:leaf, :some_value} - assert Node.mk_leaf(123) == {:leaf, 123} - end - - test "mk_split/4 creates a split node" do - assert Node.mk_split(:el, :p_id, :i_id, :n_id) == {:split, :el, :p_id, :i_id, :n_id} - end - end - - describe "Predicates" do - setup do - %{ - true_node: Node.mk_true(), - false_node: Node.mk_false(), - leaf_node: Node.mk_leaf("data"), - split_node: Node.mk_split(1, 2, 3, 4) - } - end - - test "is_true?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do - assert Node.is_true?(t) == true - assert Node.is_true?(f) == false - assert Node.is_true?(l) == false - assert Node.is_true?(s) == false - end - - test "is_false?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do - assert Node.is_false?(f) == true - assert Node.is_false?(t) == false - assert Node.is_false?(l) == false - assert Node.is_false?(s) == false - end - - test "is_leaf?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do - assert Node.is_leaf?(l) == true - assert Node.is_leaf?(t) == false - assert Node.is_leaf?(f) == false - assert Node.is_leaf?(s) == false - end - - test "is_split?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do - assert Node.is_split?(s) == true - assert Node.is_split?(t) == false - assert Node.is_split?(f) == false - assert Node.is_split?(l) == false - end - end - - describe "Accessors" do - setup do - %{ - leaf_node: Node.mk_leaf("leaf_data"), - split_node: Node.mk_split(:elem_id, :pos_child, :ign_child, :neg_child) - } - end - - test "value/1 for leaf node", %{leaf_node: l} do - assert Node.value(l) == "leaf_data" - end - - test "value/1 raises for non-leaf node" do - assert_raise ArgumentError, ~r/Not a leaf node/, fn -> Node.value(Node.mk_true()) end - - assert_raise ArgumentError, ~r/Not a leaf node/, fn -> - Node.value(Node.mk_split(1, 2, 3, 4)) - end - end - - test "element/1 for split node", %{split_node: s} do - assert Node.element(s) == :elem_id - end - - test "element/1 raises for non-split node" do - assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_true()) end - assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_leaf(1)) end - end - - test "positive_child/1 for split node", %{split_node: s} do - assert Node.positive_child(s) == :pos_child - end - - test "positive_child/1 raises for non-split node" do - assert_raise ArgumentError, ~r/Not a split node/, fn -> - Node.positive_child(Node.mk_leaf(1)) - end - end - - test "ignore_child/1 for split node", %{split_node: s} do - assert Node.ignore_child(s) == :ign_child - end - - test "ignore_child/1 raises for non-split node" do - assert_raise ArgumentError, ~r/Not a split node/, fn -> - Node.ignore_child(Node.mk_leaf(1)) - end - end - - test "negative_child/1 for split node", %{split_node: s} do - assert Node.negative_child(s) == :neg_child - end - - test "negative_child/1 raises for non-split node" do - assert_raise ArgumentError, ~r/Not a split node/, fn -> - Node.negative_child(Node.mk_leaf(1)) - end - end - end -end +# defmodule Tilly.BDD.NodeTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD.Node +# +# describe "Smart Constructors" do +# test "mk_true/0 returns true" do +# assert Node.mk_true() == true +# end +# +# test "mk_false/0 returns false" do +# assert Node.mk_false() == false +# end +# +# test "mk_leaf/1 creates a leaf node" do +# assert Node.mk_leaf(:some_value) == {:leaf, :some_value} +# assert Node.mk_leaf(123) == {:leaf, 123} +# end +# +# test "mk_split/4 creates a split node" do +# assert Node.mk_split(:el, :p_id, :i_id, :n_id) == {:split, :el, :p_id, :i_id, :n_id} +# end +# end +# +# describe "Predicates" do +# setup do +# %{ +# true_node: Node.mk_true(), +# false_node: Node.mk_false(), +# leaf_node: Node.mk_leaf("data"), +# split_node: Node.mk_split(1, 2, 3, 4) +# } +# end +# +# test "is_true?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do +# assert Node.is_true?(t) == true +# assert Node.is_true?(f) == false +# assert Node.is_true?(l) == false +# assert Node.is_true?(s) == false +# end +# +# test "is_false?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do +# assert Node.is_false?(f) == true +# assert Node.is_false?(t) == false +# assert Node.is_false?(l) == false +# assert Node.is_false?(s) == false +# end +# +# test "is_leaf?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do +# assert Node.is_leaf?(l) == true +# assert Node.is_leaf?(t) == false +# assert Node.is_leaf?(f) == false +# assert Node.is_leaf?(s) == false +# end +# +# test "is_split?/1", %{true_node: t, false_node: f, leaf_node: l, split_node: s} do +# assert Node.is_split?(s) == true +# assert Node.is_split?(t) == false +# assert Node.is_split?(f) == false +# assert Node.is_split?(l) == false +# end +# end +# +# describe "Accessors" do +# setup do +# %{ +# leaf_node: Node.mk_leaf("leaf_data"), +# split_node: Node.mk_split(:elem_id, :pos_child, :ign_child, :neg_child) +# } +# end +# +# test "value/1 for leaf node", %{leaf_node: l} do +# assert Node.value(l) == "leaf_data" +# end +# +# test "value/1 raises for non-leaf node" do +# assert_raise ArgumentError, ~r/Not a leaf node/, fn -> Node.value(Node.mk_true()) end +# +# assert_raise ArgumentError, ~r/Not a leaf node/, fn -> +# Node.value(Node.mk_split(1, 2, 3, 4)) +# end +# end +# +# test "element/1 for split node", %{split_node: s} do +# assert Node.element(s) == :elem_id +# end +# +# test "element/1 raises for non-split node" do +# assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_true()) end +# assert_raise ArgumentError, ~r/Not a split node/, fn -> Node.element(Node.mk_leaf(1)) end +# end +# +# test "positive_child/1 for split node", %{split_node: s} do +# assert Node.positive_child(s) == :pos_child +# end +# +# test "positive_child/1 raises for non-split node" do +# assert_raise ArgumentError, ~r/Not a split node/, fn -> +# Node.positive_child(Node.mk_leaf(1)) +# end +# end +# +# test "ignore_child/1 for split node", %{split_node: s} do +# assert Node.ignore_child(s) == :ign_child +# end +# +# test "ignore_child/1 raises for non-split node" do +# assert_raise ArgumentError, ~r/Not a split node/, fn -> +# Node.ignore_child(Node.mk_leaf(1)) +# end +# end +# +# test "negative_child/1 for split node", %{split_node: s} do +# assert Node.negative_child(s) == :neg_child +# end +# +# test "negative_child/1 raises for non-split node" do +# assert_raise ArgumentError, ~r/Not a split node/, fn -> +# Node.negative_child(Node.mk_leaf(1)) +# end +# end +# end +# end diff --git a/test/tilly/bdd/ops_test.exs b/test/tilly/bdd/ops_test.exs index 54855f6..cc9cb93 100644 --- a/test/tilly/bdd/ops_test.exs +++ b/test/tilly/bdd/ops_test.exs @@ -1,191 +1,191 @@ -defmodule Tilly.BDD.OpsTest do - use ExUnit.Case, async: true - - alias Tilly.BDD - alias Tilly.BDD.Node - alias Tilly.BDD.Ops - alias Tilly.BDD.IntegerBoolOps # Using a concrete ops_module for testing - - setup do - typing_ctx = BDD.init_bdd_store(%{}) - # Pre-intern some common elements for tests if needed, e.g., integers - # For now, rely on ops to intern elements as they are used. - %{initial_ctx: typing_ctx} - end - - describe "leaf/3" do - test "interning an empty leaf value returns predefined false_id", %{initial_ctx: ctx} do - {new_ctx, node_id} = Ops.leaf(ctx, false, IntegerBoolOps) - assert node_id == BDD.false_node_id() - assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache # Cache not used for this path - end - - test "interning a full leaf value returns predefined true_id", %{initial_ctx: ctx} do - {new_ctx, node_id} = Ops.leaf(ctx, true, IntegerBoolOps) - assert node_id == BDD.true_node_id() - assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache - end - - @tag :skip - test "interning a new 'other' leaf value returns a new ID", %{initial_ctx: _ctx} do - # Assuming IntegerBoolOps.test_leaf_value/1 would return :other for non-booleans - # For this test, we'd need an ops_module where e.g. an integer is an :other leaf. - # Let's simulate with a mock or by extending IntegerBoolOps if it were not read-only. - # For now, this test is conceptual for boolean leaves. - # If IntegerBoolOps was extended: - # defmodule MockIntegerOps do - # defdelegate compare_elements(e1, e2), to: IntegerBoolOps - # defdelegate equal_element?(e1, e2), to: IntegerBoolOps - # # ... other delegates - # def test_leaf_value(10), do: :other # Treat 10 as a specific leaf - # def test_leaf_value(true), do: :full - # def test_leaf_value(false), do: :empty - # end - # {ctx_after_intern, node_id} = Ops.leaf(ctx, 10, MockIntegerOps) - # assert node_id != BDD.true_node_id() and node_id != BDD.false_node_id() - # assert BDD.get_node_data(ctx_after_intern, node_id).structure == Node.mk_leaf(10) - # Placeholder for more complex leaf types. Test is skipped. - end - end - - describe "split/6 basic simplifications" do - test "if i_id is true, returns true_id", %{initial_ctx: ctx} do - {_p_ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy - {_n_ctx, n_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy - true_id = BDD.true_node_id() - - {new_ctx, result_id} = Ops.split(ctx, 10, p_id, true_id, n_id, IntegerBoolOps) - assert result_id == true_id - assert new_ctx == ctx # No new nodes or cache entries expected for this rule - end - - test "if p_id == n_id and p_id == i_id, returns p_id", %{initial_ctx: ctx} do - {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) # some leaf - i_id = p_id - n_id = p_id - - {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps) - assert result_id == p_id - # Cache might be touched if union_bdds was called, but this rule is direct. - # For p_id == i_id, it's direct. - end - - test "if p_id == n_id and p_id != i_id, returns union(p_id, i_id)", %{initial_ctx: ctx} do - {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) - {ctx, i_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(true), IntegerBoolOps) # different leaf - n_id = p_id - - # Expected union of p_id (false_leaf) and i_id (true_leaf) is true_id - # This relies on union_bdds working. - {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps) - expected_union_id = BDD.true_node_id() # Union of false_leaf and true_leaf - assert result_id == expected_union_id - end - - test "interns a new split node if no simplification rule applies", %{initial_ctx: ctx} do - {ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id - {ctx, i_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id - {ctx, n_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id (different from p_id) - - element = 20 - {new_ctx, split_node_id} = Ops.split(ctx, element, p_id, i_id, n_id, IntegerBoolOps) - - assert split_node_id != p_id and split_node_id != i_id and split_node_id != n_id - assert split_node_id != BDD.true_node_id() and split_node_id != BDD.false_node_id() - - node_data = BDD.get_node_data(new_ctx, split_node_id) - assert node_data.structure == Node.mk_split(element, p_id, i_id, n_id) - assert node_data.ops_module == IntegerBoolOps - assert new_ctx.bdd_store.next_node_id > ctx.bdd_store.next_node_id - end - end - - describe "union_bdds/3" do - test "A U A = A", %{initial_ctx: ctx} do - {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id - {new_ctx, result_id} = Ops.union_bdds(ctx, a_id, a_id) - assert result_id == a_id - assert Map.has_key?(new_ctx.bdd_store.ops_cache, {:union, a_id, a_id}) - end - - test "A U True = True", %{initial_ctx: ctx} do - {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) - true_id = BDD.true_node_id() - {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, true_id) - assert result_id == true_id - end - - test "A U False = A", %{initial_ctx: ctx} do - {ctx, a_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id - false_id = BDD.false_node_id() - {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, false_id) - assert result_id == a_id - end - - test "union of two distinct leaves", %{initial_ctx: ctx} do - # leaf(false) U leaf(true) = leaf(true OR false) = leaf(true) -> true_node_id - {ctx, leaf_false_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, leaf_true_id} = Ops.leaf(ctx, true, IntegerBoolOps) # This is BDD.true_node_id() - - {_new_ctx, result_id} = Ops.union_bdds(ctx, leaf_false_id, leaf_true_id) - assert result_id == BDD.true_node_id() - end - - test "union of two simple split nodes with same element", %{initial_ctx: ctx} do - # BDD1: split(10, True, False, False) - # BDD2: split(10, False, True, False) - # Union: split(10, True U False, False U True, False U False) - # = split(10, True, True, False) - - true_id = BDD.true_node_id() - false_id = BDD.false_node_id() - - {ctx, bdd1_id} = Ops.split(ctx, 10, true_id, false_id, false_id, IntegerBoolOps) - {ctx, bdd2_id} = Ops.split(ctx, 10, false_id, true_id, false_id, IntegerBoolOps) - - {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id) - - # Expected structure - {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, true_id, true_id, false_id, IntegerBoolOps) - assert union_id == expected_bdd_id - end - - test "union of two simple split nodes with different elements (x1 < x2)", %{initial_ctx: ctx} do - # BDD1: split(10, True, False, False) - # BDD2: split(20, False, True, False) - # Union (x1 < x2): split(10, p1, i1 U BDD2, n1) - # = split(10, True, False U BDD2, False) - # = split(10, True, BDD2, False) - - {ctx, bdd1_p1_id} = Ops.leaf(ctx, true, IntegerBoolOps) - {ctx, bdd1_i1_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, bdd1_n1_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, bdd1_id} = Ops.split(ctx, 10, bdd1_p1_id, bdd1_i1_id, bdd1_n1_id, IntegerBoolOps) - - {ctx, bdd2_p2_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, bdd2_i2_id} = Ops.leaf(ctx, true, IntegerBoolOps) - {ctx, bdd2_n2_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, bdd2_id} = Ops.split(ctx, 20, bdd2_p2_id, bdd2_i2_id, bdd2_n2_id, IntegerBoolOps) - - {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id) - - # Expected structure: split(10, True, BDD2, False) - {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, bdd1_p1_id, bdd2_id, bdd1_n1_id, IntegerBoolOps) - assert union_id == expected_bdd_id - end - - test "uses cache for repeated union operations", %{initial_ctx: ctx} do - {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) - {ctx, b_id} = Ops.leaf(ctx, true, IntegerBoolOps) - - {ctx_after_first_union, _result1_id} = Ops.union_bdds(ctx, a_id, b_id) - cache_after_first = ctx_after_first_union.bdd_store.ops_cache - - {ctx_after_second_union, _result2_id} = Ops.union_bdds(ctx_after_first_union, a_id, b_id) - # The BDD store itself (nodes, next_id) should not change on a cache hit. - # The ops_cache map reference will be the same if the result was cached. - assert ctx_after_second_union.bdd_store.ops_cache == cache_after_first - assert ctx_after_second_union.bdd_store.next_node_id == ctx_after_first_union.bdd_store.next_node_id - end - end -end +# defmodule Tilly.BDD.OpsTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD +# alias Tilly.BDD.Node +# alias Tilly.BDD.Ops +# alias Tilly.BDD.IntegerBoolOps # Using a concrete ops_module for testing +# +# setup do +# typing_ctx = BDD.init_bdd_store(%{}) +# # Pre-intern some common elements for tests if needed, e.g., integers +# # For now, rely on ops to intern elements as they are used. +# %{initial_ctx: typing_ctx} +# end +# +# describe "leaf/3" do +# test "interning an empty leaf value returns predefined false_id", %{initial_ctx: ctx} do +# {new_ctx, node_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# assert node_id == BDD.false_node_id() +# assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache # Cache not used for this path +# end +# +# test "interning a full leaf value returns predefined true_id", %{initial_ctx: ctx} do +# {new_ctx, node_id} = Ops.leaf(ctx, true, IntegerBoolOps) +# assert node_id == BDD.true_node_id() +# assert new_ctx.bdd_store.ops_cache == ctx.bdd_store.ops_cache +# end +# +# @tag :skip +# test "interning a new 'other' leaf value returns a new ID", %{initial_ctx: _ctx} do +# # Assuming IntegerBoolOps.test_leaf_value/1 would return :other for non-booleans +# # For this test, we'd need an ops_module where e.g. an integer is an :other leaf. +# # Let's simulate with a mock or by extending IntegerBoolOps if it were not read-only. +# # For now, this test is conceptual for boolean leaves. +# # If IntegerBoolOps was extended: +# # defmodule MockIntegerOps do +# # defdelegate compare_elements(e1, e2), to: IntegerBoolOps +# # defdelegate equal_element?(e1, e2), to: IntegerBoolOps +# # # ... other delegates +# # def test_leaf_value(10), do: :other # Treat 10 as a specific leaf +# # def test_leaf_value(true), do: :full +# # def test_leaf_value(false), do: :empty +# # end +# # {ctx_after_intern, node_id} = Ops.leaf(ctx, 10, MockIntegerOps) +# # assert node_id != BDD.true_node_id() and node_id != BDD.false_node_id() +# # assert BDD.get_node_data(ctx_after_intern, node_id).structure == Node.mk_leaf(10) +# # Placeholder for more complex leaf types. Test is skipped. +# end +# end +# +# describe "split/6 basic simplifications" do +# test "if i_id is true, returns true_id", %{initial_ctx: ctx} do +# {_p_ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy +# {_n_ctx, n_id} = Ops.leaf(ctx, false, IntegerBoolOps) # dummy +# true_id = BDD.true_node_id() +# +# {new_ctx, result_id} = Ops.split(ctx, 10, p_id, true_id, n_id, IntegerBoolOps) +# assert result_id == true_id +# assert new_ctx == ctx # No new nodes or cache entries expected for this rule +# end +# +# test "if p_id == n_id and p_id == i_id, returns p_id", %{initial_ctx: ctx} do +# {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) # some leaf +# i_id = p_id +# n_id = p_id +# +# {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps) +# assert result_id == p_id +# # Cache might be touched if union_bdds was called, but this rule is direct. +# # For p_id == i_id, it's direct. +# end +# +# test "if p_id == n_id and p_id != i_id, returns union(p_id, i_id)", %{initial_ctx: ctx} do +# {ctx, p_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(false), IntegerBoolOps) +# {ctx, i_id} = BDD.get_or_intern_node(ctx, Node.mk_leaf(true), IntegerBoolOps) # different leaf +# n_id = p_id +# +# # Expected union of p_id (false_leaf) and i_id (true_leaf) is true_id +# # This relies on union_bdds working. +# {_new_ctx, result_id} = Ops.split(ctx, 10, p_id, i_id, n_id, IntegerBoolOps) +# expected_union_id = BDD.true_node_id() # Union of false_leaf and true_leaf +# assert result_id == expected_union_id +# end +# +# test "interns a new split node if no simplification rule applies", %{initial_ctx: ctx} do +# {ctx, p_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id +# {ctx, i_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id +# {ctx, n_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id (different from p_id) +# +# element = 20 +# {new_ctx, split_node_id} = Ops.split(ctx, element, p_id, i_id, n_id, IntegerBoolOps) +# +# assert split_node_id != p_id and split_node_id != i_id and split_node_id != n_id +# assert split_node_id != BDD.true_node_id() and split_node_id != BDD.false_node_id() +# +# node_data = BDD.get_node_data(new_ctx, split_node_id) +# assert node_data.structure == Node.mk_split(element, p_id, i_id, n_id) +# assert node_data.ops_module == IntegerBoolOps +# assert new_ctx.bdd_store.next_node_id > ctx.bdd_store.next_node_id +# end +# end +# +# describe "union_bdds/3" do +# test "A U A = A", %{initial_ctx: ctx} do +# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) # false_node_id +# {new_ctx, result_id} = Ops.union_bdds(ctx, a_id, a_id) +# assert result_id == a_id +# assert Map.has_key?(new_ctx.bdd_store.ops_cache, {:union, a_id, a_id}) +# end +# +# test "A U True = True", %{initial_ctx: ctx} do +# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# true_id = BDD.true_node_id() +# {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, true_id) +# assert result_id == true_id +# end +# +# test "A U False = A", %{initial_ctx: ctx} do +# {ctx, a_id} = Ops.leaf(ctx, true, IntegerBoolOps) # true_node_id +# false_id = BDD.false_node_id() +# {_new_ctx, result_id} = Ops.union_bdds(ctx, a_id, false_id) +# assert result_id == a_id +# end +# +# test "union of two distinct leaves", %{initial_ctx: ctx} do +# # leaf(false) U leaf(true) = leaf(true OR false) = leaf(true) -> true_node_id +# {ctx, leaf_false_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, leaf_true_id} = Ops.leaf(ctx, true, IntegerBoolOps) # This is BDD.true_node_id() +# +# {_new_ctx, result_id} = Ops.union_bdds(ctx, leaf_false_id, leaf_true_id) +# assert result_id == BDD.true_node_id() +# end +# +# test "union of two simple split nodes with same element", %{initial_ctx: ctx} do +# # BDD1: split(10, True, False, False) +# # BDD2: split(10, False, True, False) +# # Union: split(10, True U False, False U True, False U False) +# # = split(10, True, True, False) +# +# true_id = BDD.true_node_id() +# false_id = BDD.false_node_id() +# +# {ctx, bdd1_id} = Ops.split(ctx, 10, true_id, false_id, false_id, IntegerBoolOps) +# {ctx, bdd2_id} = Ops.split(ctx, 10, false_id, true_id, false_id, IntegerBoolOps) +# +# {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id) +# +# # Expected structure +# {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, true_id, true_id, false_id, IntegerBoolOps) +# assert union_id == expected_bdd_id +# end +# +# test "union of two simple split nodes with different elements (x1 < x2)", %{initial_ctx: ctx} do +# # BDD1: split(10, True, False, False) +# # BDD2: split(20, False, True, False) +# # Union (x1 < x2): split(10, p1, i1 U BDD2, n1) +# # = split(10, True, False U BDD2, False) +# # = split(10, True, BDD2, False) +# +# {ctx, bdd1_p1_id} = Ops.leaf(ctx, true, IntegerBoolOps) +# {ctx, bdd1_i1_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, bdd1_n1_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, bdd1_id} = Ops.split(ctx, 10, bdd1_p1_id, bdd1_i1_id, bdd1_n1_id, IntegerBoolOps) +# +# {ctx, bdd2_p2_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, bdd2_i2_id} = Ops.leaf(ctx, true, IntegerBoolOps) +# {ctx, bdd2_n2_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, bdd2_id} = Ops.split(ctx, 20, bdd2_p2_id, bdd2_i2_id, bdd2_n2_id, IntegerBoolOps) +# +# {final_ctx, union_id} = Ops.union_bdds(ctx, bdd1_id, bdd2_id) +# +# # Expected structure: split(10, True, BDD2, False) +# {_final_ctx, expected_bdd_id} = Ops.split(final_ctx, 10, bdd1_p1_id, bdd2_id, bdd1_n1_id, IntegerBoolOps) +# assert union_id == expected_bdd_id +# end +# +# test "uses cache for repeated union operations", %{initial_ctx: ctx} do +# {ctx, a_id} = Ops.leaf(ctx, false, IntegerBoolOps) +# {ctx, b_id} = Ops.leaf(ctx, true, IntegerBoolOps) +# +# {ctx_after_first_union, _result1_id} = Ops.union_bdds(ctx, a_id, b_id) +# cache_after_first = ctx_after_first_union.bdd_store.ops_cache +# +# {ctx_after_second_union, _result2_id} = Ops.union_bdds(ctx_after_first_union, a_id, b_id) +# # The BDD store itself (nodes, next_id) should not change on a cache hit. +# # The ops_cache map reference will be the same if the result was cached. +# assert ctx_after_second_union.bdd_store.ops_cache == cache_after_first +# assert ctx_after_second_union.bdd_store.next_node_id == ctx_after_first_union.bdd_store.next_node_id +# end +# end +# end diff --git a/test/tilly/bdd/string_bool_ops_test.exs b/test/tilly/bdd/string_bool_ops_test.exs index 041ceb8..aeb83d5 100644 --- a/test/tilly/bdd/string_bool_ops_test.exs +++ b/test/tilly/bdd/string_bool_ops_test.exs @@ -1,72 +1,72 @@ -defmodule Tilly.BDD.StringBoolOpsTest do - use ExUnit.Case, async: true - - alias Tilly.BDD.StringBoolOps - - describe "compare_elements/2" do - test "correctly compares strings" do - assert StringBoolOps.compare_elements("apple", "banana") == :lt - assert StringBoolOps.compare_elements("banana", "apple") == :gt - assert StringBoolOps.compare_elements("cherry", "cherry") == :eq - end - end - - describe "equal_element?/2" do - test "correctly checks string equality" do - assert StringBoolOps.equal_element?("apple", "apple") == true - assert StringBoolOps.equal_element?("apple", "banana") == false - end - end - - describe "hash_element/1" do - test "hashes strings consistently" do - assert is_integer(StringBoolOps.hash_element("foo")) - assert StringBoolOps.hash_element("foo") == StringBoolOps.hash_element("foo") - assert StringBoolOps.hash_element("foo") != StringBoolOps.hash_element("bar") - end - end - - describe "leaf operations" do - test "empty_leaf/0 returns false" do - assert StringBoolOps.empty_leaf() == false - end - - test "any_leaf/0 returns true" do - assert StringBoolOps.any_leaf() == true - end - - test "is_empty_leaf?/1" do - assert StringBoolOps.is_empty_leaf?(false) == true - assert StringBoolOps.is_empty_leaf?(true) == false - end - - test "union_leaves/3" do - assert StringBoolOps.union_leaves(%{}, false, false) == false - assert StringBoolOps.union_leaves(%{}, true, false) == true - assert StringBoolOps.union_leaves(%{}, false, true) == true - assert StringBoolOps.union_leaves(%{}, true, true) == true - end - - test "intersection_leaves/3" do - assert StringBoolOps.intersection_leaves(%{}, false, false) == false - assert StringBoolOps.intersection_leaves(%{}, true, false) == false - assert StringBoolOps.intersection_leaves(%{}, false, true) == false - assert StringBoolOps.intersection_leaves(%{}, true, true) == true - end - - test "negation_leaf/2" do - assert StringBoolOps.negation_leaf(%{}, false) == true - assert StringBoolOps.negation_leaf(%{}, true) == false - end - end - - describe "test_leaf_value/1" do - test "returns :empty for false" do - assert StringBoolOps.test_leaf_value(false) == :empty - end - - test "returns :full for true" do - assert StringBoolOps.test_leaf_value(true) == :full - end - end -end +# defmodule Tilly.BDD.StringBoolOpsTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD.StringBoolOps +# +# describe "compare_elements/2" do +# test "correctly compares strings" do +# assert StringBoolOps.compare_elements("apple", "banana") == :lt +# assert StringBoolOps.compare_elements("banana", "apple") == :gt +# assert StringBoolOps.compare_elements("cherry", "cherry") == :eq +# end +# end +# +# describe "equal_element?/2" do +# test "correctly checks string equality" do +# assert StringBoolOps.equal_element?("apple", "apple") == true +# assert StringBoolOps.equal_element?("apple", "banana") == false +# end +# end +# +# describe "hash_element/1" do +# test "hashes strings consistently" do +# assert is_integer(StringBoolOps.hash_element("foo")) +# assert StringBoolOps.hash_element("foo") == StringBoolOps.hash_element("foo") +# assert StringBoolOps.hash_element("foo") != StringBoolOps.hash_element("bar") +# end +# end +# +# describe "leaf operations" do +# test "empty_leaf/0 returns false" do +# assert StringBoolOps.empty_leaf() == false +# end +# +# test "any_leaf/0 returns true" do +# assert StringBoolOps.any_leaf() == true +# end +# +# test "is_empty_leaf?/1" do +# assert StringBoolOps.is_empty_leaf?(false) == true +# assert StringBoolOps.is_empty_leaf?(true) == false +# end +# +# test "union_leaves/3" do +# assert StringBoolOps.union_leaves(%{}, false, false) == false +# assert StringBoolOps.union_leaves(%{}, true, false) == true +# assert StringBoolOps.union_leaves(%{}, false, true) == true +# assert StringBoolOps.union_leaves(%{}, true, true) == true +# end +# +# test "intersection_leaves/3" do +# assert StringBoolOps.intersection_leaves(%{}, false, false) == false +# assert StringBoolOps.intersection_leaves(%{}, true, false) == false +# assert StringBoolOps.intersection_leaves(%{}, false, true) == false +# assert StringBoolOps.intersection_leaves(%{}, true, true) == true +# end +# +# test "negation_leaf/2" do +# assert StringBoolOps.negation_leaf(%{}, false) == true +# assert StringBoolOps.negation_leaf(%{}, true) == false +# end +# end +# +# describe "test_leaf_value/1" do +# test "returns :empty for false" do +# assert StringBoolOps.test_leaf_value(false) == :empty +# end +# +# test "returns :full for true" do +# assert StringBoolOps.test_leaf_value(true) == :full +# end +# end +# end diff --git a/test/tilly/bdd_test.exs b/test/tilly/bdd_test.exs index d36acc8..1f2a979 100644 --- a/test/tilly/bdd_test.exs +++ b/test/tilly/bdd_test.exs @@ -1,163 +1,163 @@ -defmodule Tilly.BDDTest do - use ExUnit.Case, async: true - - alias Tilly.BDD.Node - - describe "init_bdd_store/1" do - test "initializes bdd_store in typing_ctx with predefined false and true nodes" do - typing_ctx = %{} - new_ctx = Tilly.BDD.init_bdd_store(typing_ctx) - - assert %{bdd_store: bdd_store} = new_ctx - assert is_map(bdd_store.nodes_by_structure) - assert is_map(bdd_store.structures_by_id) - assert bdd_store.next_node_id == 2 # 0 for false, 1 for true - assert bdd_store.ops_cache == %{} - - # Check false node - false_id = Tilly.BDD.false_node_id() - false_ops_module = Tilly.BDD.universal_ops_module() - assert bdd_store.nodes_by_structure[{Node.mk_false(), false_ops_module}] == false_id - assert bdd_store.structures_by_id[false_id] == %{structure: Node.mk_false(), ops_module: false_ops_module} - - # Check true node - true_id = Tilly.BDD.true_node_id() - true_ops_module = Tilly.BDD.universal_ops_module() - assert bdd_store.nodes_by_structure[{Node.mk_true(), true_ops_module}] == true_id - assert bdd_store.structures_by_id[true_id] == %{structure: Node.mk_true(), ops_module: true_ops_module} - end - end - - describe "get_or_intern_node/3" do - setup do - typing_ctx = Tilly.BDD.init_bdd_store(%{}) - %{initial_ctx: typing_ctx} - end - - test "interning Node.mk_false() returns predefined false_id and doesn't change store", %{initial_ctx: ctx} do - false_ops_module = Tilly.BDD.universal_ops_module() - {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_false(), false_ops_module) - assert node_id == Tilly.BDD.false_node_id() - assert new_ctx.bdd_store == ctx.bdd_store - end - - test "interning Node.mk_true() returns predefined true_id and doesn't change store", %{initial_ctx: ctx} do - true_ops_module = Tilly.BDD.universal_ops_module() - {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_true(), true_ops_module) - assert node_id == Tilly.BDD.true_node_id() - assert new_ctx.bdd_store == ctx.bdd_store - end - - test "interning a new leaf node returns a new ID and updates the store", %{initial_ctx: ctx} do - leaf_structure = Node.mk_leaf("test_leaf") - ops_mod = :my_ops - - {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) - - assert node_id == 2 # Initial next_node_id - assert ctx_after_intern.bdd_store.next_node_id == 3 - assert ctx_after_intern.bdd_store.nodes_by_structure[{leaf_structure, ops_mod}] == node_id - assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: leaf_structure, ops_module: ops_mod} - end - - test "interning the same leaf node again returns the same ID and doesn't change store", %{initial_ctx: ctx} do - leaf_structure = Node.mk_leaf("test_leaf") - ops_mod = :my_ops - - {ctx_after_first_intern, first_node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) - {ctx_after_second_intern, second_node_id} = Tilly.BDD.get_or_intern_node(ctx_after_first_intern, leaf_structure, ops_mod) - - assert first_node_id == second_node_id - assert ctx_after_first_intern.bdd_store == ctx_after_second_intern.bdd_store - end - - test "interning a new split node returns a new ID and updates the store", %{initial_ctx: ctx} do - split_structure = Node.mk_split(:el, Tilly.BDD.true_node_id(), Tilly.BDD.false_node_id(), Tilly.BDD.true_node_id()) - ops_mod = :split_ops - - {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, split_structure, ops_mod) - - assert node_id == 2 # Initial next_node_id - assert ctx_after_intern.bdd_store.next_node_id == 3 - assert ctx_after_intern.bdd_store.nodes_by_structure[{split_structure, ops_mod}] == node_id - assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: split_structure, ops_module: ops_mod} - end - - test "interning structurally identical nodes with different ops_modules results in different IDs", %{initial_ctx: ctx} do - leaf_structure = Node.mk_leaf("shared_leaf") - ops_mod1 = :ops1 - ops_mod2 = :ops2 - - {ctx1, id1} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod1) - {_ctx2, id2} = Tilly.BDD.get_or_intern_node(ctx1, leaf_structure, ops_mod2) - - assert id1 != id2 - assert id1 == 2 - assert id2 == 3 - end - - test "raises ArgumentError if bdd_store is not initialized" do - assert_raise ArgumentError, ~r/BDD store not initialized/, fn -> - Tilly.BDD.get_or_intern_node(%{}, Node.mk_leaf("foo"), :ops) - end - end - end - - describe "get_node_data/2" do - setup do - ctx = Tilly.BDD.init_bdd_store(%{}) - leaf_structure = Node.mk_leaf("data") - ops_mod = :leaf_ops - {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) - %{ctx: new_ctx, leaf_structure: leaf_structure, ops_mod: ops_mod, leaf_id: leaf_id_val} - end - - test "returns correct data for false node", %{ctx: ctx} do - false_id = Tilly.BDD.false_node_id() - false_ops_module = Tilly.BDD.universal_ops_module() - assert Tilly.BDD.get_node_data(ctx, false_id) == %{structure: Node.mk_false(), ops_module: false_ops_module} - end - - test "returns correct data for true node", %{ctx: ctx} do - true_id = Tilly.BDD.true_node_id() - true_ops_module = Tilly.BDD.universal_ops_module() - assert Tilly.BDD.get_node_data(ctx, true_id) == %{structure: Node.mk_true(), ops_module: true_ops_module} - end - - test "returns correct data for a custom interned leaf node", %{ctx: ctx, leaf_structure: ls, ops_mod: om, leaf_id: id} do - assert Tilly.BDD.get_node_data(ctx, id) == %{structure: ls, ops_module: om} - end - - test "returns nil for an unknown node ID", %{ctx: ctx} do - assert Tilly.BDD.get_node_data(ctx, 999) == nil - end - - test "returns nil if bdd_store not in ctx" do - assert Tilly.BDD.get_node_data(%{}, 0) == nil - end - end - - describe "is_false_node?/2 and is_true_node?/2" do - setup do - ctx = Tilly.BDD.init_bdd_store(%{}) - leaf_structure = Node.mk_leaf("data") - ops_mod = :leaf_ops - {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) - %{ctx: new_ctx, leaf_id: leaf_id_val} - end - - test "is_false_node?/2", %{ctx: ctx, leaf_id: id} do - assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.false_node_id()) == true - assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.true_node_id()) == false - assert Tilly.BDD.is_false_node?(ctx, id) == false - assert Tilly.BDD.is_false_node?(ctx, 999) == false # Unknown ID - end - - test "is_true_node?/2", %{ctx: ctx, leaf_id: id} do - assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.true_node_id()) == true - assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.false_node_id()) == false - assert Tilly.BDD.is_true_node?(ctx, id) == false - assert Tilly.BDD.is_true_node?(ctx, 999) == false # Unknown ID - end - end -end +# defmodule Tilly.BDDTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD.Node +# +# describe "init_bdd_store/1" do +# test "initializes bdd_store in typing_ctx with predefined false and true nodes" do +# typing_ctx = %{} +# new_ctx = Tilly.BDD.init_bdd_store(typing_ctx) +# +# assert %{bdd_store: bdd_store} = new_ctx +# assert is_map(bdd_store.nodes_by_structure) +# assert is_map(bdd_store.structures_by_id) +# assert bdd_store.next_node_id == 2 # 0 for false, 1 for true +# assert bdd_store.ops_cache == %{} +# +# # Check false node +# false_id = Tilly.BDD.false_node_id() +# false_ops_module = Tilly.BDD.universal_ops_module() +# assert bdd_store.nodes_by_structure[{Node.mk_false(), false_ops_module}] == false_id +# assert bdd_store.structures_by_id[false_id] == %{structure: Node.mk_false(), ops_module: false_ops_module} +# +# # Check true node +# true_id = Tilly.BDD.true_node_id() +# true_ops_module = Tilly.BDD.universal_ops_module() +# assert bdd_store.nodes_by_structure[{Node.mk_true(), true_ops_module}] == true_id +# assert bdd_store.structures_by_id[true_id] == %{structure: Node.mk_true(), ops_module: true_ops_module} +# end +# end +# +# describe "get_or_intern_node/3" do +# setup do +# typing_ctx = Tilly.BDD.init_bdd_store(%{}) +# %{initial_ctx: typing_ctx} +# end +# +# test "interning Node.mk_false() returns predefined false_id and doesn't change store", %{initial_ctx: ctx} do +# false_ops_module = Tilly.BDD.universal_ops_module() +# {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_false(), false_ops_module) +# assert node_id == Tilly.BDD.false_node_id() +# assert new_ctx.bdd_store == ctx.bdd_store +# end +# +# test "interning Node.mk_true() returns predefined true_id and doesn't change store", %{initial_ctx: ctx} do +# true_ops_module = Tilly.BDD.universal_ops_module() +# {new_ctx, node_id} = Tilly.BDD.get_or_intern_node(ctx, Node.mk_true(), true_ops_module) +# assert node_id == Tilly.BDD.true_node_id() +# assert new_ctx.bdd_store == ctx.bdd_store +# end +# +# test "interning a new leaf node returns a new ID and updates the store", %{initial_ctx: ctx} do +# leaf_structure = Node.mk_leaf("test_leaf") +# ops_mod = :my_ops +# +# {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) +# +# assert node_id == 2 # Initial next_node_id +# assert ctx_after_intern.bdd_store.next_node_id == 3 +# assert ctx_after_intern.bdd_store.nodes_by_structure[{leaf_structure, ops_mod}] == node_id +# assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: leaf_structure, ops_module: ops_mod} +# end +# +# test "interning the same leaf node again returns the same ID and doesn't change store", %{initial_ctx: ctx} do +# leaf_structure = Node.mk_leaf("test_leaf") +# ops_mod = :my_ops +# +# {ctx_after_first_intern, first_node_id} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) +# {ctx_after_second_intern, second_node_id} = Tilly.BDD.get_or_intern_node(ctx_after_first_intern, leaf_structure, ops_mod) +# +# assert first_node_id == second_node_id +# assert ctx_after_first_intern.bdd_store == ctx_after_second_intern.bdd_store +# end +# +# test "interning a new split node returns a new ID and updates the store", %{initial_ctx: ctx} do +# split_structure = Node.mk_split(:el, Tilly.BDD.true_node_id(), Tilly.BDD.false_node_id(), Tilly.BDD.true_node_id()) +# ops_mod = :split_ops +# +# {ctx_after_intern, node_id} = Tilly.BDD.get_or_intern_node(ctx, split_structure, ops_mod) +# +# assert node_id == 2 # Initial next_node_id +# assert ctx_after_intern.bdd_store.next_node_id == 3 +# assert ctx_after_intern.bdd_store.nodes_by_structure[{split_structure, ops_mod}] == node_id +# assert ctx_after_intern.bdd_store.structures_by_id[node_id] == %{structure: split_structure, ops_module: ops_mod} +# end +# +# test "interning structurally identical nodes with different ops_modules results in different IDs", %{initial_ctx: ctx} do +# leaf_structure = Node.mk_leaf("shared_leaf") +# ops_mod1 = :ops1 +# ops_mod2 = :ops2 +# +# {ctx1, id1} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod1) +# {_ctx2, id2} = Tilly.BDD.get_or_intern_node(ctx1, leaf_structure, ops_mod2) +# +# assert id1 != id2 +# assert id1 == 2 +# assert id2 == 3 +# end +# +# test "raises ArgumentError if bdd_store is not initialized" do +# assert_raise ArgumentError, ~r/BDD store not initialized/, fn -> +# Tilly.BDD.get_or_intern_node(%{}, Node.mk_leaf("foo"), :ops) +# end +# end +# end +# +# describe "get_node_data/2" do +# setup do +# ctx = Tilly.BDD.init_bdd_store(%{}) +# leaf_structure = Node.mk_leaf("data") +# ops_mod = :leaf_ops +# {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) +# %{ctx: new_ctx, leaf_structure: leaf_structure, ops_mod: ops_mod, leaf_id: leaf_id_val} +# end +# +# test "returns correct data for false node", %{ctx: ctx} do +# false_id = Tilly.BDD.false_node_id() +# false_ops_module = Tilly.BDD.universal_ops_module() +# assert Tilly.BDD.get_node_data(ctx, false_id) == %{structure: Node.mk_false(), ops_module: false_ops_module} +# end +# +# test "returns correct data for true node", %{ctx: ctx} do +# true_id = Tilly.BDD.true_node_id() +# true_ops_module = Tilly.BDD.universal_ops_module() +# assert Tilly.BDD.get_node_data(ctx, true_id) == %{structure: Node.mk_true(), ops_module: true_ops_module} +# end +# +# test "returns correct data for a custom interned leaf node", %{ctx: ctx, leaf_structure: ls, ops_mod: om, leaf_id: id} do +# assert Tilly.BDD.get_node_data(ctx, id) == %{structure: ls, ops_module: om} +# end +# +# test "returns nil for an unknown node ID", %{ctx: ctx} do +# assert Tilly.BDD.get_node_data(ctx, 999) == nil +# end +# +# test "returns nil if bdd_store not in ctx" do +# assert Tilly.BDD.get_node_data(%{}, 0) == nil +# end +# end +# +# describe "is_false_node?/2 and is_true_node?/2" do +# setup do +# ctx = Tilly.BDD.init_bdd_store(%{}) +# leaf_structure = Node.mk_leaf("data") +# ops_mod = :leaf_ops +# {new_ctx, leaf_id_val} = Tilly.BDD.get_or_intern_node(ctx, leaf_structure, ops_mod) +# %{ctx: new_ctx, leaf_id: leaf_id_val} +# end +# +# test "is_false_node?/2", %{ctx: ctx, leaf_id: id} do +# assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.false_node_id()) == true +# assert Tilly.BDD.is_false_node?(ctx, Tilly.BDD.true_node_id()) == false +# assert Tilly.BDD.is_false_node?(ctx, id) == false +# assert Tilly.BDD.is_false_node?(ctx, 999) == false # Unknown ID +# end +# +# test "is_true_node?/2", %{ctx: ctx, leaf_id: id} do +# assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.true_node_id()) == true +# assert Tilly.BDD.is_true_node?(ctx, Tilly.BDD.false_node_id()) == false +# assert Tilly.BDD.is_true_node?(ctx, id) == false +# assert Tilly.BDD.is_true_node?(ctx, 999) == false # Unknown ID +# end +# end +# end diff --git a/test/tilly/type/ops_test.exs b/test/tilly/type/ops_test.exs index 21d3664..947a4f1 100644 --- a/test/tilly/type/ops_test.exs +++ b/test/tilly/type/ops_test.exs @@ -1,162 +1,162 @@ -defmodule Tilly.Type.OpsTest do - use ExUnit.Case, async: true - - alias Tilly.BDD - alias Tilly.Type.Store - alias Tilly.Type.Ops - - defp init_context do - %{} - |> BDD.init_bdd_store() - |> Store.init_type_store() - end - - describe "get_type_nothing/1 and get_type_any/1" do - test "get_type_nothing returns an interned Descr ID for the empty type" do - ctx = init_context() - {ctx_after_nothing, nothing_id} = Ops.get_type_nothing(ctx) - assert Ops.is_empty_type?(ctx_after_nothing, nothing_id) - end - - test "get_type_any returns an interned Descr ID for the universal type" do - ctx = init_context() - {ctx_after_any, any_id} = Ops.get_type_any(ctx) - refute Ops.is_empty_type?(ctx_after_any, any_id) - - # Further check: any type negated should be nothing type - {ctx1, neg_any_id} = Ops.negation_type(ctx_after_any, any_id) - {ctx2, nothing_id} = Ops.get_type_nothing(ctx1) - assert neg_any_id == nothing_id - end - end - - describe "literal type constructors" do - test "create_atom_literal_type/2" do - ctx = init_context() - {ctx1, atom_foo_id} = Ops.create_atom_literal_type(ctx, :foo) - {ctx2, atom_bar_id} = Ops.create_atom_literal_type(ctx1, :bar) - {ctx3, atom_foo_again_id} = Ops.create_atom_literal_type(ctx2, :foo) - - refute Ops.is_empty_type?(ctx3, atom_foo_id) - refute Ops.is_empty_type?(ctx3, atom_bar_id) - assert atom_foo_id != atom_bar_id - assert atom_foo_id == atom_foo_again_id - - # Test intersection: (:foo & :bar) should be Nothing - {ctx4, intersection_id} = Ops.intersection_types(ctx3, atom_foo_id, atom_bar_id) - assert Ops.is_empty_type?(ctx4, intersection_id) - - # Test union: (:foo | :bar) should not be empty - {ctx5, union_id} = Ops.union_types(ctx4, atom_foo_id, atom_bar_id) - refute Ops.is_empty_type?(ctx5, union_id) - - # Test negation: (not :foo) should not be empty and not be :foo - {ctx6, not_foo_id} = Ops.negation_type(ctx5, atom_foo_id) - refute Ops.is_empty_type?(ctx6, not_foo_id) - {ctx7, intersection_not_foo_and_foo} = Ops.intersection_types(ctx6, atom_foo_id, not_foo_id) - assert Ops.is_empty_type?(ctx7, intersection_not_foo_and_foo) - end - - test "create_integer_literal_type/2" do - ctx = init_context() - {ctx1, int_1_id} = Ops.create_integer_literal_type(ctx, 1) - {ctx2, int_2_id} = Ops.create_integer_literal_type(ctx1, 2) - - refute Ops.is_empty_type?(ctx2, int_1_id) # Use ctx2 - {ctx3, intersection_id} = Ops.intersection_types(ctx2, int_1_id, int_2_id) - assert Ops.is_empty_type?(ctx3, intersection_id) - end - - test "create_string_literal_type/2" do - ctx = init_context() - {ctx1, str_a_id} = Ops.create_string_literal_type(ctx, "a") - {ctx2, str_b_id} = Ops.create_string_literal_type(ctx1, "b") - - refute Ops.is_empty_type?(ctx2, str_a_id) # Use ctx2 - {ctx3, intersection_id} = Ops.intersection_types(ctx2, str_a_id, str_b_id) - assert Ops.is_empty_type?(ctx3, intersection_id) - end - end - - describe "primitive type constructors (any_of_kind)" do - test "get_primitive_type_any_atom/1" do - ctx = init_context() - {ctx1, any_atom_id} = Ops.get_primitive_type_any_atom(ctx) - {ctx2, atom_foo_id} = Ops.create_atom_literal_type(ctx1, :foo) - - refute Ops.is_empty_type?(ctx2, any_atom_id) - # :foo should be a subtype of AnyAtom (i.e., :foo INTERSECTION (NEGATION AnyAtom) == Empty) - # Or, :foo UNION AnyAtom == AnyAtom - # Or, :foo INTERSECTION AnyAtom == :foo - {ctx3, intersection_foo_any_atom_id} = Ops.intersection_types(ctx2, atom_foo_id, any_atom_id) - assert intersection_foo_any_atom_id == atom_foo_id # Check it simplifies to :foo - - # Test original subtype logic: (:foo & (not AnyAtom)) == Empty - {ctx4, not_any_atom_id} = Ops.negation_type(ctx3, any_atom_id) # Use ctx3 - {ctx5, intersection_subtype_check_id} = Ops.intersection_types(ctx4, atom_foo_id, not_any_atom_id) - assert Ops.is_empty_type?(ctx5, intersection_subtype_check_id) - - # AnyAtom & AnyInteger should be Empty - {ctx6, any_integer_id} = Ops.get_primitive_type_any_integer(ctx5) # Use ctx5 - {ctx7, atom_int_intersect_id} = Ops.intersection_types(ctx6, any_atom_id, any_integer_id) - assert Ops.is_empty_type?(ctx7, atom_int_intersect_id) - end - end - - describe "union_types, intersection_types, negation_type" do - test "basic set properties" do - ctx0 = init_context() - {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a) - {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b) - {ctx3, type_c_id} = Ops.create_atom_literal_type(ctx2, :c) - {ctx4, nothing_id} = Ops.get_type_nothing(ctx3) - - # A | Nothing = A - {ctx5, union_a_nothing_id} = Ops.union_types(ctx4, type_a_id, nothing_id) - assert union_a_nothing_id == type_a_id - - # A & Nothing = Nothing - {ctx6, intersect_a_nothing_id} = Ops.intersection_types(ctx5, type_a_id, nothing_id) - assert intersect_a_nothing_id == nothing_id - - # not (not A) = A - {ctx7, not_a_id} = Ops.negation_type(ctx6, type_a_id) - {ctx8, not_not_a_id} = Ops.negation_type(ctx7, not_a_id) - assert not_not_a_id == type_a_id - - # A | B - {ctx9, union_ab_id} = Ops.union_types(ctx8, type_a_id, type_b_id) - # (A | B) & A = A - {ctx10, intersect_union_a_id} = Ops.intersection_types(ctx9, union_ab_id, type_a_id) - assert intersect_union_a_id == type_a_id - - # (A | B) & C = Nothing (if A, B, C are distinct atom literals) - {ctx11, intersect_union_c_id} = Ops.intersection_types(ctx10, union_ab_id, type_c_id) - assert Ops.is_empty_type?(ctx11, intersect_union_c_id) - - # Commutativity and idempotence of union/intersection are implicitly tested by caching - # and canonical key generation in apply_type_op. - end - - test "type operations are cached" do - ctx0 = init_context() - {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a) - {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b) - - # Perform an operation - {ctx3, union1_id} = Ops.union_types(ctx2, type_a_id, type_b_id) - initial_cache_size = map_size(ctx3.type_store.ops_cache) - assert initial_cache_size > 0 # Ensure something was cached - - # Perform the same operation again - {ctx4, union2_id} = Ops.union_types(ctx3, type_a_id, type_b_id) - assert union1_id == union2_id - assert map_size(ctx4.type_store.ops_cache) == initial_cache_size # Cache size should not change - - # Perform with swapped arguments (commutative) - {ctx5, union3_id} = Ops.union_types(ctx4, type_b_id, type_a_id) - assert union1_id == union3_id - assert map_size(ctx5.type_store.ops_cache) == initial_cache_size # Cache size should not change - end - end -end +# defmodule Tilly.Type.OpsTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD +# alias Tilly.Type.Store +# alias Tilly.Type.Ops +# +# defp init_context do +# %{} +# |> BDD.init_bdd_store() +# |> Store.init_type_store() +# end +# +# describe "get_type_nothing/1 and get_type_any/1" do +# test "get_type_nothing returns an interned Descr ID for the empty type" do +# ctx = init_context() +# {ctx_after_nothing, nothing_id} = Ops.get_type_nothing(ctx) +# assert Ops.is_empty_type?(ctx_after_nothing, nothing_id) +# end +# +# test "get_type_any returns an interned Descr ID for the universal type" do +# ctx = init_context() +# {ctx_after_any, any_id} = Ops.get_type_any(ctx) +# refute Ops.is_empty_type?(ctx_after_any, any_id) +# +# # Further check: any type negated should be nothing type +# {ctx1, neg_any_id} = Ops.negation_type(ctx_after_any, any_id) +# {ctx2, nothing_id} = Ops.get_type_nothing(ctx1) +# assert neg_any_id == nothing_id +# end +# end +# +# describe "literal type constructors" do +# test "create_atom_literal_type/2" do +# ctx = init_context() +# {ctx1, atom_foo_id} = Ops.create_atom_literal_type(ctx, :foo) +# {ctx2, atom_bar_id} = Ops.create_atom_literal_type(ctx1, :bar) +# {ctx3, atom_foo_again_id} = Ops.create_atom_literal_type(ctx2, :foo) +# +# refute Ops.is_empty_type?(ctx3, atom_foo_id) +# refute Ops.is_empty_type?(ctx3, atom_bar_id) +# assert atom_foo_id != atom_bar_id +# assert atom_foo_id == atom_foo_again_id +# +# # Test intersection: (:foo & :bar) should be Nothing +# {ctx4, intersection_id} = Ops.intersection_types(ctx3, atom_foo_id, atom_bar_id) +# assert Ops.is_empty_type?(ctx4, intersection_id) +# +# # Test union: (:foo | :bar) should not be empty +# {ctx5, union_id} = Ops.union_types(ctx4, atom_foo_id, atom_bar_id) +# refute Ops.is_empty_type?(ctx5, union_id) +# +# # Test negation: (not :foo) should not be empty and not be :foo +# {ctx6, not_foo_id} = Ops.negation_type(ctx5, atom_foo_id) +# refute Ops.is_empty_type?(ctx6, not_foo_id) +# {ctx7, intersection_not_foo_and_foo} = Ops.intersection_types(ctx6, atom_foo_id, not_foo_id) +# assert Ops.is_empty_type?(ctx7, intersection_not_foo_and_foo) +# end +# +# test "create_integer_literal_type/2" do +# ctx = init_context() +# {ctx1, int_1_id} = Ops.create_integer_literal_type(ctx, 1) +# {ctx2, int_2_id} = Ops.create_integer_literal_type(ctx1, 2) +# +# refute Ops.is_empty_type?(ctx2, int_1_id) # Use ctx2 +# {ctx3, intersection_id} = Ops.intersection_types(ctx2, int_1_id, int_2_id) +# assert Ops.is_empty_type?(ctx3, intersection_id) +# end +# +# test "create_string_literal_type/2" do +# ctx = init_context() +# {ctx1, str_a_id} = Ops.create_string_literal_type(ctx, "a") +# {ctx2, str_b_id} = Ops.create_string_literal_type(ctx1, "b") +# +# refute Ops.is_empty_type?(ctx2, str_a_id) # Use ctx2 +# {ctx3, intersection_id} = Ops.intersection_types(ctx2, str_a_id, str_b_id) +# assert Ops.is_empty_type?(ctx3, intersection_id) +# end +# end +# +# describe "primitive type constructors (any_of_kind)" do +# test "get_primitive_type_any_atom/1" do +# ctx = init_context() +# {ctx1, any_atom_id} = Ops.get_primitive_type_any_atom(ctx) +# {ctx2, atom_foo_id} = Ops.create_atom_literal_type(ctx1, :foo) +# +# refute Ops.is_empty_type?(ctx2, any_atom_id) +# # :foo should be a subtype of AnyAtom (i.e., :foo INTERSECTION (NEGATION AnyAtom) == Empty) +# # Or, :foo UNION AnyAtom == AnyAtom +# # Or, :foo INTERSECTION AnyAtom == :foo +# {ctx3, intersection_foo_any_atom_id} = Ops.intersection_types(ctx2, atom_foo_id, any_atom_id) +# assert intersection_foo_any_atom_id == atom_foo_id # Check it simplifies to :foo +# +# # Test original subtype logic: (:foo & (not AnyAtom)) == Empty +# {ctx4, not_any_atom_id} = Ops.negation_type(ctx3, any_atom_id) # Use ctx3 +# {ctx5, intersection_subtype_check_id} = Ops.intersection_types(ctx4, atom_foo_id, not_any_atom_id) +# assert Ops.is_empty_type?(ctx5, intersection_subtype_check_id) +# +# # AnyAtom & AnyInteger should be Empty +# {ctx6, any_integer_id} = Ops.get_primitive_type_any_integer(ctx5) # Use ctx5 +# {ctx7, atom_int_intersect_id} = Ops.intersection_types(ctx6, any_atom_id, any_integer_id) +# assert Ops.is_empty_type?(ctx7, atom_int_intersect_id) +# end +# end +# +# describe "union_types, intersection_types, negation_type" do +# test "basic set properties" do +# ctx0 = init_context() +# {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a) +# {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b) +# {ctx3, type_c_id} = Ops.create_atom_literal_type(ctx2, :c) +# {ctx4, nothing_id} = Ops.get_type_nothing(ctx3) +# +# # A | Nothing = A +# {ctx5, union_a_nothing_id} = Ops.union_types(ctx4, type_a_id, nothing_id) +# assert union_a_nothing_id == type_a_id +# +# # A & Nothing = Nothing +# {ctx6, intersect_a_nothing_id} = Ops.intersection_types(ctx5, type_a_id, nothing_id) +# assert intersect_a_nothing_id == nothing_id +# +# # not (not A) = A +# {ctx7, not_a_id} = Ops.negation_type(ctx6, type_a_id) +# {ctx8, not_not_a_id} = Ops.negation_type(ctx7, not_a_id) +# assert not_not_a_id == type_a_id +# +# # A | B +# {ctx9, union_ab_id} = Ops.union_types(ctx8, type_a_id, type_b_id) +# # (A | B) & A = A +# {ctx10, intersect_union_a_id} = Ops.intersection_types(ctx9, union_ab_id, type_a_id) +# assert intersect_union_a_id == type_a_id +# +# # (A | B) & C = Nothing (if A, B, C are distinct atom literals) +# {ctx11, intersect_union_c_id} = Ops.intersection_types(ctx10, union_ab_id, type_c_id) +# assert Ops.is_empty_type?(ctx11, intersect_union_c_id) +# +# # Commutativity and idempotence of union/intersection are implicitly tested by caching +# # and canonical key generation in apply_type_op. +# end +# +# test "type operations are cached" do +# ctx0 = init_context() +# {ctx1, type_a_id} = Ops.create_atom_literal_type(ctx0, :a) +# {ctx2, type_b_id} = Ops.create_atom_literal_type(ctx1, :b) +# +# # Perform an operation +# {ctx3, union1_id} = Ops.union_types(ctx2, type_a_id, type_b_id) +# initial_cache_size = map_size(ctx3.type_store.ops_cache) +# assert initial_cache_size > 0 # Ensure something was cached +# +# # Perform the same operation again +# {ctx4, union2_id} = Ops.union_types(ctx3, type_a_id, type_b_id) +# assert union1_id == union2_id +# assert map_size(ctx4.type_store.ops_cache) == initial_cache_size # Cache size should not change +# +# # Perform with swapped arguments (commutative) +# {ctx5, union3_id} = Ops.union_types(ctx4, type_b_id, type_a_id) +# assert union1_id == union3_id +# assert map_size(ctx5.type_store.ops_cache) == initial_cache_size # Cache size should not change +# end +# end +# end diff --git a/test/tilly/type/store_test.exs b/test/tilly/type/store_test.exs index 2b8ec76..a309273 100644 --- a/test/tilly/type/store_test.exs +++ b/test/tilly/type/store_test.exs @@ -1,67 +1,67 @@ -defmodule Tilly.Type.StoreTest do - use ExUnit.Case, async: true - - alias Tilly.BDD - alias Tilly.Type - alias Tilly.Type.Store - - defp init_context do - %{} - |> BDD.init_bdd_store() - |> Store.init_type_store() - end - - describe "init_type_store/1" do - test "initializes an empty type store in the typing_ctx" do - typing_ctx = %{} - new_ctx = Store.init_type_store(typing_ctx) - type_store = Map.get(new_ctx, :type_store) - - assert type_store.descrs_by_structure == %{} - assert type_store.structures_by_id == %{} - assert type_store.next_descr_id == 0 - end - end - - describe "get_or_intern_descr/2 and get_descr_by_id/2" do - test "interns a new Descr map and retrieves it" do - typing_ctx = init_context() - descr_map1 = Type.empty_descr(typing_ctx) # Uses canonical BDD.false_node_id() - - # Intern first time - {ctx1, id1} = Store.get_or_intern_descr(typing_ctx, descr_map1) - assert id1 == 0 - assert Store.get_descr_by_id(ctx1, id1) == descr_map1 - assert ctx1.type_store.next_descr_id == 1 - - # Retrieve existing - {ctx2, id1_retrieved} = Store.get_or_intern_descr(ctx1, descr_map1) - assert id1_retrieved == id1 - assert ctx2 == ctx1 # Context should not change if already interned - - # Intern a different Descr map - descr_map2 = Type.any_descr(typing_ctx) # Uses canonical BDD.true_node_id() - {ctx3, id2} = Store.get_or_intern_descr(ctx2, descr_map2) - assert id2 == 1 - assert Store.get_descr_by_id(ctx3, id2) == descr_map2 - assert ctx3.type_store.next_descr_id == 2 - - # Ensure original is still retrievable - assert Store.get_descr_by_id(ctx3, id1) == descr_map1 - end - - test "get_descr_by_id returns nil for non-existent ID" do - typing_ctx = init_context() - assert Store.get_descr_by_id(typing_ctx, 999) == nil - end - - test "raises an error if type store is not initialized" do - uninitialized_ctx = %{} - descr_map = Type.empty_descr(uninitialized_ctx) - - assert_raise ArgumentError, - "Type store not initialized in typing_ctx. Call init_type_store first.", - fn -> Store.get_or_intern_descr(uninitialized_ctx, descr_map) end - end - end -end +# defmodule Tilly.Type.StoreTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD +# alias Tilly.Type +# alias Tilly.Type.Store +# +# defp init_context do +# %{} +# |> BDD.init_bdd_store() +# |> Store.init_type_store() +# end +# +# describe "init_type_store/1" do +# test "initializes an empty type store in the typing_ctx" do +# typing_ctx = %{} +# new_ctx = Store.init_type_store(typing_ctx) +# type_store = Map.get(new_ctx, :type_store) +# +# assert type_store.descrs_by_structure == %{} +# assert type_store.structures_by_id == %{} +# assert type_store.next_descr_id == 0 +# end +# end +# +# describe "get_or_intern_descr/2 and get_descr_by_id/2" do +# test "interns a new Descr map and retrieves it" do +# typing_ctx = init_context() +# descr_map1 = Type.empty_descr(typing_ctx) # Uses canonical BDD.false_node_id() +# +# # Intern first time +# {ctx1, id1} = Store.get_or_intern_descr(typing_ctx, descr_map1) +# assert id1 == 0 +# assert Store.get_descr_by_id(ctx1, id1) == descr_map1 +# assert ctx1.type_store.next_descr_id == 1 +# +# # Retrieve existing +# {ctx2, id1_retrieved} = Store.get_or_intern_descr(ctx1, descr_map1) +# assert id1_retrieved == id1 +# assert ctx2 == ctx1 # Context should not change if already interned +# +# # Intern a different Descr map +# descr_map2 = Type.any_descr(typing_ctx) # Uses canonical BDD.true_node_id() +# {ctx3, id2} = Store.get_or_intern_descr(ctx2, descr_map2) +# assert id2 == 1 +# assert Store.get_descr_by_id(ctx3, id2) == descr_map2 +# assert ctx3.type_store.next_descr_id == 2 +# +# # Ensure original is still retrievable +# assert Store.get_descr_by_id(ctx3, id1) == descr_map1 +# end +# +# test "get_descr_by_id returns nil for non-existent ID" do +# typing_ctx = init_context() +# assert Store.get_descr_by_id(typing_ctx, 999) == nil +# end +# +# test "raises an error if type store is not initialized" do +# uninitialized_ctx = %{} +# descr_map = Type.empty_descr(uninitialized_ctx) +# +# assert_raise ArgumentError, +# "Type store not initialized in typing_ctx. Call init_type_store first.", +# fn -> Store.get_or_intern_descr(uninitialized_ctx, descr_map) end +# end +# end +# end diff --git a/test/tilly/type_test.exs b/test/tilly/type_test.exs index 7c05fed..30fd47a 100644 --- a/test/tilly/type_test.exs +++ b/test/tilly/type_test.exs @@ -1,39 +1,39 @@ -defmodule Tilly.TypeTest do - use ExUnit.Case, async: true - - alias Tilly.BDD - alias Tilly.Type - - describe "empty_descr/1" do - test "returns a Descr map with all BDD IDs pointing to false" do - typing_ctx = BDD.init_bdd_store(%{}) - descr = Type.empty_descr(typing_ctx) - false_id = BDD.false_node_id() - - assert descr.atoms_bdd_id == false_id - assert descr.integers_bdd_id == false_id - assert descr.strings_bdd_id == false_id - assert descr.pairs_bdd_id == false_id - assert descr.records_bdd_id == false_id - assert descr.functions_bdd_id == false_id - assert descr.absent_marker_bdd_id == false_id - end - end - - describe "any_descr/1" do - test "returns a Descr map with BDD IDs pointing to true (and absent_marker to false)" do - typing_ctx = BDD.init_bdd_store(%{}) - descr = Type.any_descr(typing_ctx) - true_id = BDD.true_node_id() - false_id = BDD.false_node_id() - - assert descr.atoms_bdd_id == true_id - assert descr.integers_bdd_id == true_id - assert descr.strings_bdd_id == true_id - assert descr.pairs_bdd_id == true_id - assert descr.records_bdd_id == true_id - assert descr.functions_bdd_id == true_id - assert descr.absent_marker_bdd_id == false_id - end - end -end +# defmodule Tilly.TypeTest do +# use ExUnit.Case, async: true +# +# alias Tilly.BDD +# alias Tilly.Type +# +# describe "empty_descr/1" do +# test "returns a Descr map with all BDD IDs pointing to false" do +# typing_ctx = BDD.init_bdd_store(%{}) +# descr = Type.empty_descr(typing_ctx) +# false_id = BDD.false_node_id() +# +# assert descr.atoms_bdd_id == false_id +# assert descr.integers_bdd_id == false_id +# assert descr.strings_bdd_id == false_id +# assert descr.pairs_bdd_id == false_id +# assert descr.records_bdd_id == false_id +# assert descr.functions_bdd_id == false_id +# assert descr.absent_marker_bdd_id == false_id +# end +# end +# +# describe "any_descr/1" do +# test "returns a Descr map with BDD IDs pointing to true (and absent_marker to false)" do +# typing_ctx = BDD.init_bdd_store(%{}) +# descr = Type.any_descr(typing_ctx) +# true_id = BDD.true_node_id() +# false_id = BDD.false_node_id() +# +# assert descr.atoms_bdd_id == true_id +# assert descr.integers_bdd_id == true_id +# assert descr.strings_bdd_id == true_id +# assert descr.pairs_bdd_id == true_id +# assert descr.records_bdd_id == true_id +# assert descr.functions_bdd_id == true_id +# assert descr.absent_marker_bdd_id == false_id +# end +# end +# end